Convolutional neural network for the Classification of Portraits and Landscape Images

Haitao Shang

Dataset

WikiArt is an amazing resource containing centuries of artwork. Since such datasets are wonderful for deep learning, Kaggle has hosted a challenge to characterize the 'fingerprints' of various artists. The Kaggle dataset contains metadata and also a set of images that have been resized so that the shorter dimension is 256 pixels. Here,we will construct a CNN model to classify some portraits and landscape images

Initialization

In [1]:
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.python.client import device_lib

import matplotlib.pyplot as plt
import keras
from sklearn import metrics
import numpy as np
import pandas as pd
import os
from time import time
import shutil
import sys
from IPython.display import display, Image


from keras import layers
from keras import models
from keras import optimizers
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.python.eager import context

from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from keras.callbacks import TensorBoard

from keras import backend as K
if K.backend()=='tensorflow':
    K.set_image_data_format('channels_last')

# Config the matlotlib backend as plotting inline in IPython
%matplotlib inline

class TrainValTensorBoard(TensorBoard):
    def __init__(self, log_dir='./logs', **kwargs):
        # Make the original `TensorBoard` log to a subdirectory 'training'
        training_log_dir = os.path.join(log_dir, 'training')
        super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)

        # Log the validation metrics to a separate subdirectory
        self.val_log_dir = os.path.join(log_dir, 'validation')

    def set_model(self, model):
        # Setup writer for validation metrics
        self.val_writer = tf.summary.FileWriter(self.val_log_dir)
        super(TrainValTensorBoard, self).set_model(model)

    def on_epoch_end(self, epoch, logs=None):
        # Pop the validation logs and handle them separately with
        # `self.val_writer`. Also rename the keys so that they can
        # be plotted on the same figure with the training metrics
        logs = logs or {}
        val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
        for name, value in val_logs.items():
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = value.item()
            summary_value.tag = name
            self.val_writer.add_summary(summary, epoch)
        self.val_writer.flush()

        # Pass the remaining logs to `TensorBoard.on_epoch_end`
        logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
        super(TrainValTensorBoard, self).on_epoch_end(epoch, logs)

    def on_train_end(self, logs=None):
        super(TrainValTensorBoard, self).on_train_end(logs)
        self.val_writer.close()

        
class TrainValTensorBoard(TensorBoard):
    def __init__(self, log_dir='./logs', **kwargs):
        # Make the original `TensorBoard` log to a subdirectory 'training'
        training_log_dir = os.path.join(log_dir, 'training')
        super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)

        # Log the validation metrics to a separate subdirectory
        self.val_log_dir = os.path.join(log_dir, 'validation')

    def set_model(self, model):
        # Setup writer for validation metrics
        self.val_writer = tf.summary.FileWriter(self.val_log_dir)
        super(TrainValTensorBoard, self).set_model(model)

    def on_epoch_end(self, epoch, logs=None):
        # Pop the validation logs and handle them separately with
        # `self.val_writer`. Also rename the keys so that they can
        # be plotted on the same figure with the training metrics
        logs = logs or {}
        val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
        for name, value in val_logs.items():
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = value.item()
            summary_value.tag = name
            self.val_writer.add_summary(summary, epoch)
        self.val_writer.flush()

        # Pass the remaining logs to `TensorBoard.on_epoch_end`
        logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
        super(TrainValTensorBoard, self).on_epoch_end(epoch, logs)

    def on_train_end(self, logs=None):
        super(TrainValTensorBoard, self).on_train_end(logs)
        self.val_writer.close()        
        
        
# special matplotlib command for global plot configuration
from matplotlib import rcParams
import matplotlib.cm as cm
import matplotlib as mpl
from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D

dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
                (0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
                (0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
                (0.4588235294117647, 0.4392156862745098, 0.7019607843137254),            
                (0.4, 0.6509803921568628, 0.11764705882352941),
                (0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
                (0.6509803921568628, 0.4627450980392157, 0.11372549019607843)]

cmap_set1 = ListedColormap(['#e41a1c', '#377eb8', '#4daf4a'])
dark2_cmap=ListedColormap(dark2_colors)

def set_mpl_params():
    rcParams['figure.figsize'] = (12, 6)
    rcParams['figure.dpi'] = 100
    rcParams['axes.prop_cycle'].by_key()['color'][1]
    rcParams['lines.linewidth'] = 2
    rcParams['axes.facecolor'] = 'white'
    rcParams['font.size'] = 14
    rcParams['patch.edgecolor'] = 'white'
    rcParams['patch.facecolor'] = dark2_colors[0]
    rcParams['font.family'] = 'StixGeneral'

set_mpl_params()
Using TensorFlow backend.

Display some portraits and landscapes in the training, validation, and test set.

(1) Split the pictures in training set, validation set, and test set into two categories -- "portrait" and "landscape".

We creat three new folders -- "new_train", "new_test", and "new_validation". The folder "new_train" contains two subfolders "train_portrait" and "train_landscape", the folder "new_test" contains two subfolders "test_portrait" and "test_landscape", and the folder "new_validation" contains two subfolders "validation_portrait" and "validation_landscape".

In [ ]:
# Split the files in the "train" set into two files "train_portrait" and "train_landscape":
f=open("./train/train.csv","rb")
list=pd.read_csv(f)

train_category = ['train_portrait', 'train_landscape']
for i in train_category:
    os.mkdir(i)

category = ['portrait', 'landscape']
for i in category:
    listnew=list[list["CATEGORY_ID"]==i]
    l=listnew["FILE_ID"].tolist() 
    new_file = 'train_' + str(i)
    for fname in l:
        src = os.path.join('./train', fname)
        dst = os.path.join(new_file, fname)
        shutil.copyfile(src, dst)
        
# Split the files in the "test" set into two files "test_portrait" and "test_landscape":
f=open("./test/test.csv","rb")
list=pd.read_csv(f)

train_category = ['test_portrait', 'test_landscape']
for i in train_category:
    os.mkdir(i)

category = ['portrait', 'landscape']
for i in category:
    listnew=list[list["CATEGORY_ID"]==i]
    l=listnew["FIELD_ID"].tolist() 
    new_file = 'test_' + str(i)
    for fname in l:
        src = os.path.join('./test', fname)
        dst = os.path.join(new_file, fname)
        shutil.copyfile(src, dst)
        
# Split the files in the "validation" set into two files "validation_portrait" and "validation_landscape":
f=open("./validation/validation.csv","rb")
list=pd.read_csv(f)

train_category = ['validation_portrait', 'validation_landscape']
for i in train_category:
    os.mkdir(i)

category = ['portrait', 'landscape']
for i in category:
    listnew=list[list["CATEGORY_ID"]==i]
    l=listnew["FIELD_ID"].tolist() 
    new_file = 'validation_' + str(i)
    for fname in l:
        src = os.path.join('./validation', fname)
        dst = os.path.join(new_file, fname)
        shutil.copyfile(src, dst)
        
#os.mkdir('new_train')
#os.mkdir('new_test')
#os.mkdir('new_validation')

(2) Display the first 5 portraits and the first 5 landscapes

In [2]:
from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)

train_dir = './new_train'
validation_dir = 'new_validation'
test_dir = './new_test'

train_generator = train_datagen.flow_from_directory(
        train_dir,
        target_size=(64, 64),
        batch_size=32,
        class_mode='binary')

validation_generator = validation_datagen.flow_from_directory(
        validation_dir,
        target_size=(64, 64),
        batch_size=32,
        class_mode='binary')

test_generator = test_datagen.flow_from_directory(
        test_dir,
        target_size=(64, 64),
        batch_size=32,  
        class_mode='binary')
Found 16315 images belonging to 2 classes.
Found 8158 images belonging to 2 classes.
Found 7379 images belonging to 2 classes.
In [14]:
def plot_strip(data, labels, display_index):
    plt.rcParams['figure.figsize'] = (20.0, 20.0)
    f, ax = plt.subplots(nrows=1, ncols=10)
    for i, j in enumerate(display_index):
        ax[i].axis('off')
        ax[i].set_title(['Landscape', 'Portrait'][int(labels[j])], loc='center')
        ax[i].imshow(data[j,:,:], cmap='gray')     
In [15]:
# The function "index_collector" is used to collect the indexes of the first five portraits and the first five landscapes. 
def index_collector(labels):
    portrait_index = []
    landsapce_index = []
    for i in range (0, 20):
        if labels[i] == 1.0:
            portrait_index.append(i)
        else:
            landsapce_index.append(i)
    portrait_index = portrait_index[:5]
    landsapce_index = landsapce_index[:5]
    index_list = portrait_index + landsapce_index
    return index_list 

(A) Display the first 5 portraits and the first 5 landscapes in the training dataset.

In [16]:
for train_data_batch, train_labels_batch in train_generator:
    print('data batch shape:', train_data_batch.shape)
    print('labels batch shape:', train_labels_batch.shape)
    break
    
display_index_train = index_collector(train_labels_batch.tolist())

plot_strip(train_data_batch, train_labels_batch, display_index_train)
data batch shape: (32, 64, 64, 3)
labels batch shape: (32,)

(B) Display the first 5 portraits and the first 5 landscapes in the validation dataset.

In [17]:
for validation_data_batch, validation_labels_batch in validation_generator:
    print('data batch shape:', validation_data_batch.shape)
    print('labels batch shape:', validation_labels_batch.shape)
    break

display_index_validation = index_collector(validation_labels_batch.tolist())
    
plot_strip(validation_data_batch, validation_labels_batch, display_index_validation)
data batch shape: (32, 64, 64, 3)
labels batch shape: (32,)

(C) Display the first 5 portraits and the first 5 landscapes in the test dataset.

In [19]:
for test_data_batch, test_labels_batch in test_generator:
    print('data batch shape:', test_data_batch.shape)
    print('labels batch shape:', test_labels_batch.shape)
    break

display_index_test = index_collector(test_labels_batch.tolist())
    
plot_strip(test_data_batch, test_labels_batch, display_index_test)
data batch shape: (32, 64, 64, 3)
labels batch shape: (32,)

Construct a baseline CNN classifier using Keras

Construct a baseline CNN classifier using Keras for the training set and assess the validation set performance at each epoch. The goal is to correctly classify portraits from landscapes. The resulting performance on the training and validation set will be plotted as a function of epoch using the criteria over which you are optimizing.

A five-layer CNN classifier is used to train the training set and the resulting performance on the training and validation set are plotted as functions of the number of epochs.

In [8]:
def Simple_CNN_Model (activation_function_1, activation_function_2, optimizer_type, epoch_number):
    
    K.clear_session()
    model = models.Sequential(name='FiveLayerModel')
    model.add(layers.Conv2D(32, (3, 3), padding='same', activation=activation_function_1,
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
    model.add(layers.MaxPooling2D((2, 2), name='max_pool1'))
    model.add(layers.Conv2D(64, (3, 3), padding='same', activation=activation_function_1, name = 'conv2'))
    model.add(layers.MaxPooling2D((2, 2), name='max_pool2'))
    model.add(layers.Conv2D(128, (3, 3), padding='same', activation=activation_function_1, name = 'conv3'))
    model.add(layers.MaxPooling2D((2, 2), name='max_pool3'))
    model.add(layers.Conv2D(128, (3, 3), padding='same', activation=activation_function_1, name = 'conv4'))
    model.add(layers.MaxPooling2D((2, 2), name='max_pool4'))
    model.add(layers.Flatten())
    model.add(layers.Dense(512, kernel_initializer='glorot_uniform', activation=activation_function_1, name='fc1'))
    model.add(layers.Dense(1, kernel_initializer='glorot_uniform', activation=activation_function_2, name='fc2'))
    model.compile(loss='binary_crossentropy', optimizer = optimizer_type, metrics=['accuracy'])
    
    model.summary()

    # Fit the model
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=100,
        epochs=epoch_number,
        validation_data=validation_generator,
        validation_steps=50,
        verbose=1, 
        # callbacks=[TrainValTensorBoard("logs/{}".format(time()), write_graph=True)]
    )
    test_loss, test_acc = model.evaluate_generator(test_generator, steps=100)
    print('\ntest accuracy:', test_acc)
    print('test loss:', test_loss)
    
    acc = history.history['acc']
    val_acc = history.history['val_acc']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs = range(len(acc))
    
    plt.plot(epochs, acc, 'bo', label='Training acc')
    plt.plot(epochs, val_acc, 'g-', label='Validation acc')
    plt.xlabel("Num of Epochs")
    plt.ylabel("Accuracy")
    plt.title('Training and validation accuracy')
    plt.legend()
    plt.figure()

    plt.plot(epochs, loss, 'bo', label='Training loss')
    plt.plot(epochs, val_loss, 'g-', label='Validation loss')
    plt.xlabel("Num of Epochs")
    plt.ylabel("Loss")
    plt.title('Training and validation loss')
    plt.legend()
    
    plt.show()
In [34]:
sgd = optimizers.SGD(lr = 0.05, decay=1e-5, momentum=0.9, nesterov=True)
Simple_CNN_Model('relu', 'sigmoid', 'sgd', 60)
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 32)        896       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 32)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 64)        18496     
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 64)        0         
_________________________________________________________________
conv3 (Conv2D)               (None, 16, 16, 128)       73856     
_________________________________________________________________
max_pool3 (MaxPooling2D)     (None, 8, 8, 128)         0         
_________________________________________________________________
conv4 (Conv2D)               (None, 8, 8, 128)         147584    
_________________________________________________________________
max_pool4 (MaxPooling2D)     (None, 4, 4, 128)         0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 2048)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               1049088   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 1,290,433
Trainable params: 1,290,433
Non-trainable params: 0
_________________________________________________________________
Epoch 1/60
100/100 [==============================] - 11s 114ms/step - loss: 0.6858 - acc: 0.5573 - val_loss: 0.6769 - val_acc: 0.5312
Epoch 2/60
100/100 [==============================] - 11s 112ms/step - loss: 0.6617 - acc: 0.6816 - val_loss: 0.6441 - val_acc: 0.7075
Epoch 3/60
100/100 [==============================] - 16s 163ms/step - loss: 0.6006 - acc: 0.7316 - val_loss: 0.5419 - val_acc: 0.7456
Epoch 4/60
100/100 [==============================] - 18s 176ms/step - loss: 0.5530 - acc: 0.7194 - val_loss: 0.5447 - val_acc: 0.7250
Epoch 5/60
100/100 [==============================] - 18s 177ms/step - loss: 0.4959 - acc: 0.7550 - val_loss: 0.5620 - val_acc: 0.6969
Epoch 6/60
100/100 [==============================] - 18s 178ms/step - loss: 0.4660 - acc: 0.7675 - val_loss: 0.4479 - val_acc: 0.7760
Epoch 7/60
100/100 [==============================] - 18s 183ms/step - loss: 0.4403 - acc: 0.7854 - val_loss: 0.4300 - val_acc: 0.8056
Epoch 8/60
100/100 [==============================] - 18s 178ms/step - loss: 0.4141 - acc: 0.8009 - val_loss: 0.3811 - val_acc: 0.8206
Epoch 9/60
100/100 [==============================] - 18s 181ms/step - loss: 0.3963 - acc: 0.8169 - val_loss: 0.4289 - val_acc: 0.7969
Epoch 10/60
100/100 [==============================] - 18s 180ms/step - loss: 0.3691 - acc: 0.8306 - val_loss: 0.3518 - val_acc: 0.8462
Epoch 11/60
100/100 [==============================] - 18s 184ms/step - loss: 0.3634 - acc: 0.8369 - val_loss: 0.3304 - val_acc: 0.8554
Epoch 12/60
100/100 [==============================] - 18s 178ms/step - loss: 0.3551 - acc: 0.8394 - val_loss: 0.3659 - val_acc: 0.8350
Epoch 13/60
100/100 [==============================] - 18s 181ms/step - loss: 0.3339 - acc: 0.8447 - val_loss: 0.3011 - val_acc: 0.8644
Epoch 14/60
100/100 [==============================] - 18s 182ms/step - loss: 0.3305 - acc: 0.8506 - val_loss: 0.3143 - val_acc: 0.8656
Epoch 15/60
100/100 [==============================] - 18s 183ms/step - loss: 0.3168 - acc: 0.8603 - val_loss: 0.3104 - val_acc: 0.8631
Epoch 16/60
100/100 [==============================] - 18s 180ms/step - loss: 0.3316 - acc: 0.8600 - val_loss: 0.3014 - val_acc: 0.8698
Epoch 17/60
100/100 [==============================] - 18s 182ms/step - loss: 0.3298 - acc: 0.8622 - val_loss: 0.3030 - val_acc: 0.8756
Epoch 18/60
100/100 [==============================] - 18s 177ms/step - loss: 0.3030 - acc: 0.8686 - val_loss: 0.2728 - val_acc: 0.8869
Epoch 19/60
100/100 [==============================] - 17s 175ms/step - loss: 0.2910 - acc: 0.8681 - val_loss: 0.5265 - val_acc: 0.7638
Epoch 20/60
100/100 [==============================] - 18s 180ms/step - loss: 0.2875 - acc: 0.8728 - val_loss: 0.2804 - val_acc: 0.8938
Epoch 21/60
100/100 [==============================] - 18s 178ms/step - loss: 0.2849 - acc: 0.8797 - val_loss: 0.2969 - val_acc: 0.8811
Epoch 22/60
100/100 [==============================] - 18s 176ms/step - loss: 0.2837 - acc: 0.8772 - val_loss: 0.2753 - val_acc: 0.8812
Epoch 23/60
100/100 [==============================] - 18s 180ms/step - loss: 0.2645 - acc: 0.8846 - val_loss: 0.3707 - val_acc: 0.8450
Epoch 24/60
100/100 [==============================] - 19s 187ms/step - loss: 0.2893 - acc: 0.8812 - val_loss: 0.2482 - val_acc: 0.9012
Epoch 25/60
100/100 [==============================] - 19s 189ms/step - loss: 0.2730 - acc: 0.8841 - val_loss: 0.2650 - val_acc: 0.8881
Epoch 26/60
100/100 [==============================] - 19s 188ms/step - loss: 0.2870 - acc: 0.8806 - val_loss: 0.2416 - val_acc: 0.9024
Epoch 27/60
100/100 [==============================] - 18s 177ms/step - loss: 0.2622 - acc: 0.8966 - val_loss: 0.2786 - val_acc: 0.8825
Epoch 28/60
100/100 [==============================] - 18s 185ms/step - loss: 0.2741 - acc: 0.8850 - val_loss: 0.2501 - val_acc: 0.9012
Epoch 29/60
100/100 [==============================] - 18s 181ms/step - loss: 0.2504 - acc: 0.8956 - val_loss: 0.3310 - val_acc: 0.8444
Epoch 30/60
100/100 [==============================] - 18s 181ms/step - loss: 0.2415 - acc: 0.8995 - val_loss: 0.2570 - val_acc: 0.9044
Epoch 31/60
100/100 [==============================] - 18s 181ms/step - loss: 0.2662 - acc: 0.8878 - val_loss: 0.2875 - val_acc: 0.8780
Epoch 32/60
100/100 [==============================] - 18s 181ms/step - loss: 0.2614 - acc: 0.8891 - val_loss: 0.2323 - val_acc: 0.9131
Epoch 33/60
100/100 [==============================] - 18s 185ms/step - loss: 0.2360 - acc: 0.9012 - val_loss: 0.2374 - val_acc: 0.9056
Epoch 34/60
100/100 [==============================] - 18s 180ms/step - loss: 0.2400 - acc: 0.8969 - val_loss: 0.2413 - val_acc: 0.9075
Epoch 35/60
100/100 [==============================] - 18s 179ms/step - loss: 0.2516 - acc: 0.8973 - val_loss: 0.2319 - val_acc: 0.9062
Epoch 36/60
100/100 [==============================] - 18s 185ms/step - loss: 0.2402 - acc: 0.9034 - val_loss: 0.2387 - val_acc: 0.9086
Epoch 37/60
100/100 [==============================] - 18s 183ms/step - loss: 0.2519 - acc: 0.8978 - val_loss: 0.2325 - val_acc: 0.9187
Epoch 38/60
100/100 [==============================] - 17s 174ms/step - loss: 0.2323 - acc: 0.9061 - val_loss: 0.2682 - val_acc: 0.8975
Epoch 39/60
100/100 [==============================] - 17s 174ms/step - loss: 0.2217 - acc: 0.9094 - val_loss: 0.2260 - val_acc: 0.9100
Epoch 40/60
100/100 [==============================] - 18s 175ms/step - loss: 0.2379 - acc: 0.9084 - val_loss: 0.2389 - val_acc: 0.9031
Epoch 41/60
100/100 [==============================] - 18s 175ms/step - loss: 0.2282 - acc: 0.9097 - val_loss: 0.1980 - val_acc: 0.9199
Epoch 42/60
100/100 [==============================] - 18s 176ms/step - loss: 0.2220 - acc: 0.9150 - val_loss: 0.2762 - val_acc: 0.8806
Epoch 43/60
100/100 [==============================] - 17s 173ms/step - loss: 0.2358 - acc: 0.9087 - val_loss: 0.2335 - val_acc: 0.9012
Epoch 44/60
100/100 [==============================] - 18s 178ms/step - loss: 0.2328 - acc: 0.9081 - val_loss: 0.2274 - val_acc: 0.9062
Epoch 45/60
100/100 [==============================] - 18s 183ms/step - loss: 0.2122 - acc: 0.9166 - val_loss: 0.2109 - val_acc: 0.9219
Epoch 46/60
100/100 [==============================] - 18s 181ms/step - loss: 0.2170 - acc: 0.9100 - val_loss: 0.2088 - val_acc: 0.9174
Epoch 47/60
100/100 [==============================] - 17s 175ms/step - loss: 0.2058 - acc: 0.9175 - val_loss: 0.2164 - val_acc: 0.9213
Epoch 48/60
100/100 [==============================] - 18s 179ms/step - loss: 0.2292 - acc: 0.9084 - val_loss: 0.2665 - val_acc: 0.9031
Epoch 49/60
100/100 [==============================] - 19s 187ms/step - loss: 0.2234 - acc: 0.9131 - val_loss: 0.2048 - val_acc: 0.9194
Epoch 50/60
100/100 [==============================] - 20s 197ms/step - loss: 0.2117 - acc: 0.9116 - val_loss: 0.2188 - val_acc: 0.9163
Epoch 51/60
100/100 [==============================] - 19s 194ms/step - loss: 0.2197 - acc: 0.9168 - val_loss: 0.2186 - val_acc: 0.9149
Epoch 52/60
100/100 [==============================] - 18s 184ms/step - loss: 0.2203 - acc: 0.9070 - val_loss: 0.2096 - val_acc: 0.9219
Epoch 53/60
100/100 [==============================] - 19s 186ms/step - loss: 0.2064 - acc: 0.9194 - val_loss: 0.2094 - val_acc: 0.9156
Epoch 54/60
100/100 [==============================] - 18s 180ms/step - loss: 0.2144 - acc: 0.9137 - val_loss: 0.2272 - val_acc: 0.9150
Epoch 55/60
100/100 [==============================] - 18s 182ms/step - loss: 0.2096 - acc: 0.9203 - val_loss: 0.2101 - val_acc: 0.9219
Epoch 56/60
100/100 [==============================] - 18s 178ms/step - loss: 0.1955 - acc: 0.9213 - val_loss: 0.2054 - val_acc: 0.9200
Epoch 57/60
100/100 [==============================] - 19s 185ms/step - loss: 0.2004 - acc: 0.9181 - val_loss: 0.2133 - val_acc: 0.9186
Epoch 58/60
100/100 [==============================] - 18s 181ms/step - loss: 0.2102 - acc: 0.9181 - val_loss: 0.2471 - val_acc: 0.9006
Epoch 59/60
100/100 [==============================] - 19s 186ms/step - loss: 0.1984 - acc: 0.9194 - val_loss: 0.2647 - val_acc: 0.8975
Epoch 60/60
100/100 [==============================] - 18s 185ms/step - loss: 0.2007 - acc: 0.9172 - val_loss: 0.3945 - val_acc: 0.8306

test accuracy: 0.81875
test loss: 0.41300503686070444

Comments From the pattern of training and validation curves, describe what is good/bad and what you plan to do next to improve the result.
(A) The Good Aspect
I tried different combinations for the "activation_function_1" and "activation_function_2", and several different types of optimizer in Problem 2. It turned the combination -- "activation_function_1 = "relu" and "activation_function_2 = "sigmoid" -- works the best among all combinations I have tried. ANd also, the "stochastic gradient descent" optimizer defined in Problem 2 works well.
(1) In problem 2, I tried 60 epochs in total. Both the training accuracy and validation accuracy increase with the number of epochs, while both the training loss and validation loss decrease with the number of epochs, although they bounce around a little bit.
(2) The decrease in tarining loss and the decrease in validation loss are apparent -- they dropped from a large initial value (around $68\%$) to a small final value (around $20\%$).
(3) Similarly, the increase in tarining accuracy and the increase in validation accuracy are apparent as well -- they rose from a samll initial value (around $54\%$) to a large final value (around $90\%$).
(B) The Bad Aspect
(1) Both the tarining loss and the validation loss reached around $20\%$ after 47 epochs and stalled. Increasing the number of epochs after 47 epochs does NOT decrease either the tarining loss or the validation loss.
(2) Both the training accuracy and the validation accuracy reached around $90\%$ after 24 epochs and stalled. Increasing the number of epochs after 24 epochs does NOT improve either the tarining accuracy or the validation accuracy.
(C) Plan of Improving the Results
To improve the results, I will try different architectures, different parameteres, and different optimizers//regularizations to obtain an optimal CNN. This is what I will do in Problem 4.

Improve the network using a combination of architecture choices, parameter tuning, and experimenting with different optimizers/dropout/regularization/etc.

1. Architecture choice

1.1 The hypothesis/strategy Ststement of Architecture Choice

To find an optimal architecture for the CNN, different number of convolution-subsambling pairs, different number of feature maps, and different number of units for the dense layers will be tried here.

1.2 The Types of Tests for Architecture Choice

Here, the accuracy and loss of training, validation, and test sets obtained from different architectures will be used to choose the optimal architecture. The final values of accuracy and loss and how the accuracy and loss change with epochs will be compared too determine the optimal values.

1.3 The Code and Results for Architecture Choice

1.3.1 Architecture Choice -- Determine the number of convolution-subsambling pairs

In [9]:
# Here, we will try differnet values for the number of convolution-subsambling pairs: 1, 2, 3, 4, and 5. 
def CNN_Architecture_Selector_1 (activation_function_1, activation_function_2, optimizer_type, epoch_number):
    K.clear_session()
    model = [0] * 5
    
    for j in range(0, 5):
        model[j] = models.Sequential()
        model[j].add(layers.Conv2D(32, (3, 3), padding='same', activation=activation_function_1,
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool1'))
        if j>0:
            model[j].add(layers.Conv2D(64, (3, 3), padding='same', activation=activation_function_1, name = 'conv2'))
            model[j].add(layers.MaxPooling2D((2, 2), name='max_pool2'))
            if j>1:
                model[j].add(layers.Conv2D(128, (3, 3), padding='same', activation=activation_function_1, name = 'conv3'))
                model[j].add(layers.MaxPooling2D((2, 2), name='max_pool3'))
            if j>2:
                model[j].add(layers.Conv2D(256, (3, 3), padding='same', activation=activation_function_1, name = 'conv4'))
                model[j].add(layers.MaxPooling2D((2, 2), name='max_pool4'))
            if j>3:
                model[j].add(layers.Conv2D(512, (3, 3), padding='same', activation=activation_function_1, name = 'conv5'))
                model[j].add(layers.MaxPooling2D((2, 2), name='max_pool5'))
                
        
        model[j].add(layers.Flatten())
        model[j].add(layers.Dense(512, kernel_initializer='glorot_uniform', activation=activation_function_1, name='fc1'))
        model[j].add(layers.Dense(1, kernel_initializer='glorot_uniform', activation=activation_function_2, name='fc2'))
        model[j].compile(loss='binary_crossentropy', optimizer = optimizer_type, metrics=['accuracy'])
    
        model[j].summary()
        
         # Fit the model
        history = model[j].fit_generator(
        train_generator,
        steps_per_epoch=100,
        epochs=epoch_number,
        validation_data=validation_generator,
        validation_steps=50,
        verbose=1, 
        # callbacks=[TrainValTensorBoard("logs/{}".format(time()), write_graph=True)]
        )
        test_loss, test_acc = model[j].evaluate_generator(test_generator, steps=100)
        print('\ntest accuracy:', test_acc)
        print('test loss:', test_loss)
    
        acc = history.history['acc']
        val_acc = history.history['val_acc']
        loss = history.history['loss']
        val_loss = history.history['val_loss']
        epochs = range(len(acc))
    
        plt.plot(epochs, acc, 'bo', label='Training acc')
        plt.plot(epochs, val_acc, 'g-', label='Validation acc')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Accuracy")
        plt.title('Training and validation accuracy')
        plt.legend()
        plt.figure()

        plt.plot(epochs, loss, 'bo', label='Training loss')
        plt.plot(epochs, val_loss, 'g-', label='Validation loss')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Loss")
        plt.title('Training and validation loss')
        plt.legend()
        
        plt.show()
In [41]:
sgd = optimizers.SGD(lr = 0.05, decay=1e-5, momentum=0.9, nesterov=True)
CNN_Architecture_Selector_1('relu', 'sigmoid', 'sgd', 40)
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 32)        896       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 32)        0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 32768)             0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               16777728  
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 16,779,137
Trainable params: 16,779,137
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 31s 310ms/step - loss: 0.5868 - acc: 0.6925 - val_loss: 0.5124 - val_acc: 0.7412
Epoch 2/40
100/100 [==============================] - 30s 298ms/step - loss: 0.4930 - acc: 0.7616 - val_loss: 0.4755 - val_acc: 0.7781
Epoch 3/40
100/100 [==============================] - 29s 287ms/step - loss: 0.4688 - acc: 0.7700 - val_loss: 0.4098 - val_acc: 0.8044
Epoch 4/40
100/100 [==============================] - 29s 288ms/step - loss: 0.4162 - acc: 0.8100 - val_loss: 0.3963 - val_acc: 0.8206
Epoch 5/40
100/100 [==============================] - 29s 291ms/step - loss: 0.4095 - acc: 0.8078 - val_loss: 0.3855 - val_acc: 0.8206
Epoch 6/40
100/100 [==============================] - 29s 290ms/step - loss: 0.3899 - acc: 0.8234 - val_loss: 0.3867 - val_acc: 0.8273
Epoch 7/40
100/100 [==============================] - 29s 291ms/step - loss: 0.3841 - acc: 0.8184 - val_loss: 0.3811 - val_acc: 0.8256
Epoch 8/40
100/100 [==============================] - 28s 284ms/step - loss: 0.3691 - acc: 0.8275 - val_loss: 0.3714 - val_acc: 0.8275
Epoch 9/40
100/100 [==============================] - 29s 287ms/step - loss: 0.3574 - acc: 0.8419 - val_loss: 0.3472 - val_acc: 0.8500
Epoch 10/40
100/100 [==============================] - 29s 288ms/step - loss: 0.3636 - acc: 0.8451 - val_loss: 0.3252 - val_acc: 0.8650
Epoch 11/40
100/100 [==============================] - 29s 290ms/step - loss: 0.3461 - acc: 0.8516 - val_loss: 0.3275 - val_acc: 0.8586
Epoch 12/40
100/100 [==============================] - 29s 287ms/step - loss: 0.3444 - acc: 0.8475 - val_loss: 0.3276 - val_acc: 0.8619
Epoch 13/40
100/100 [==============================] - 29s 287ms/step - loss: 0.3335 - acc: 0.8500 - val_loss: 0.3372 - val_acc: 0.8475
Epoch 14/40
100/100 [==============================] - 29s 290ms/step - loss: 0.3292 - acc: 0.8512 - val_loss: 0.3308 - val_acc: 0.8456
Epoch 15/40
100/100 [==============================] - 29s 290ms/step - loss: 0.3170 - acc: 0.8591 - val_loss: 0.3053 - val_acc: 0.8719
Epoch 16/40
100/100 [==============================] - 29s 287ms/step - loss: 0.3118 - acc: 0.8663 - val_loss: 0.3155 - val_acc: 0.8673
Epoch 17/40
100/100 [==============================] - 29s 289ms/step - loss: 0.3106 - acc: 0.8688 - val_loss: 0.3086 - val_acc: 0.8612
Epoch 18/40
100/100 [==============================] - 29s 290ms/step - loss: 0.3224 - acc: 0.8641 - val_loss: 0.2928 - val_acc: 0.8706
Epoch 19/40
100/100 [==============================] - 29s 286ms/step - loss: 0.2989 - acc: 0.8725 - val_loss: 0.2930 - val_acc: 0.8825
Epoch 20/40
100/100 [==============================] - 29s 287ms/step - loss: 0.3038 - acc: 0.8719 - val_loss: 0.2892 - val_acc: 0.8775
Epoch 21/40
100/100 [==============================] - 29s 289ms/step - loss: 0.2874 - acc: 0.8816 - val_loss: 0.2827 - val_acc: 0.8811
Epoch 22/40
100/100 [==============================] - 29s 287ms/step - loss: 0.2874 - acc: 0.8794 - val_loss: 0.4295 - val_acc: 0.8025
Epoch 23/40
100/100 [==============================] - 29s 288ms/step - loss: 0.2788 - acc: 0.8847 - val_loss: 0.2484 - val_acc: 0.9062
Epoch 24/40
100/100 [==============================] - 29s 288ms/step - loss: 0.2847 - acc: 0.8791 - val_loss: 0.2918 - val_acc: 0.8769
Epoch 25/40
100/100 [==============================] - 29s 289ms/step - loss: 0.2754 - acc: 0.8838 - val_loss: 0.2838 - val_acc: 0.8812
Epoch 26/40
100/100 [==============================] - 29s 288ms/step - loss: 0.2727 - acc: 0.8903 - val_loss: 0.3640 - val_acc: 0.8298
Epoch 27/40
100/100 [==============================] - 29s 289ms/step - loss: 0.2455 - acc: 0.9020 - val_loss: 0.2917 - val_acc: 0.8688
Epoch 28/40
100/100 [==============================] - 29s 291ms/step - loss: 0.2632 - acc: 0.8866 - val_loss: 0.2638 - val_acc: 0.8888
Epoch 29/40
100/100 [==============================] - 28s 284ms/step - loss: 0.2744 - acc: 0.8863 - val_loss: 0.2581 - val_acc: 0.8925
Epoch 30/40
100/100 [==============================] - 29s 289ms/step - loss: 0.2653 - acc: 0.8878 - val_loss: 0.2794 - val_acc: 0.8800
Epoch 31/40
100/100 [==============================] - 29s 290ms/step - loss: 0.2662 - acc: 0.8920 - val_loss: 0.2975 - val_acc: 0.8711
Epoch 32/40
100/100 [==============================] - 29s 289ms/step - loss: 0.2387 - acc: 0.9062 - val_loss: 0.2366 - val_acc: 0.9031
Epoch 33/40
100/100 [==============================] - 29s 288ms/step - loss: 0.2507 - acc: 0.9000 - val_loss: 0.2553 - val_acc: 0.8888
Epoch 34/40
100/100 [==============================] - 29s 288ms/step - loss: 0.2506 - acc: 0.8947 - val_loss: 0.2585 - val_acc: 0.8912
Epoch 35/40
100/100 [==============================] - 29s 289ms/step - loss: 0.2518 - acc: 0.8944 - val_loss: 0.2608 - val_acc: 0.9019
Epoch 36/40
100/100 [==============================] - 29s 286ms/step - loss: 0.2449 - acc: 0.8991 - val_loss: 0.2303 - val_acc: 0.9080
Epoch 37/40
100/100 [==============================] - 29s 289ms/step - loss: 0.2425 - acc: 0.8966 - val_loss: 0.2567 - val_acc: 0.8975
Epoch 38/40
100/100 [==============================] - 29s 290ms/step - loss: 0.2415 - acc: 0.9047 - val_loss: 0.2466 - val_acc: 0.9000
Epoch 39/40
100/100 [==============================] - 29s 288ms/step - loss: 0.2431 - acc: 0.9044 - val_loss: 0.2344 - val_acc: 0.9019
Epoch 40/40
100/100 [==============================] - 29s 289ms/step - loss: 0.2203 - acc: 0.9047 - val_loss: 0.2616 - val_acc: 0.8988

test accuracy: 0.8984375
test loss: 0.25702860608696937
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 32)        896       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 32)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 64)        18496     
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 64)        0         
_________________________________________________________________
flatten_2 (Flatten)          (None, 16384)             0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               8389120   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 8,409,025
Trainable params: 8,409,025
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 24s 239ms/step - loss: 0.6501 - acc: 0.6716 - val_loss: 0.5781 - val_acc: 0.7319
Epoch 2/40
100/100 [==============================] - 22s 221ms/step - loss: 0.5438 - acc: 0.7416 - val_loss: 0.4986 - val_acc: 0.7631
Epoch 3/40
100/100 [==============================] - 22s 219ms/step - loss: 0.4897 - acc: 0.7638 - val_loss: 0.4541 - val_acc: 0.7900
Epoch 4/40
100/100 [==============================] - 22s 221ms/step - loss: 0.4686 - acc: 0.7728 - val_loss: 0.4452 - val_acc: 0.7869
Epoch 5/40
100/100 [==============================] - 22s 222ms/step - loss: 0.4307 - acc: 0.7953 - val_loss: 0.4296 - val_acc: 0.8106
Epoch 6/40
100/100 [==============================] - 22s 222ms/step - loss: 0.4173 - acc: 0.8031 - val_loss: 0.3766 - val_acc: 0.8398
Epoch 7/40
100/100 [==============================] - 22s 222ms/step - loss: 0.3972 - acc: 0.8125 - val_loss: 0.3780 - val_acc: 0.8269
Epoch 8/40
100/100 [==============================] - 22s 220ms/step - loss: 0.3736 - acc: 0.8281 - val_loss: 0.3849 - val_acc: 0.8081
Epoch 9/40
100/100 [==============================] - 22s 221ms/step - loss: 0.3711 - acc: 0.8319 - val_loss: 0.3419 - val_acc: 0.8662
Epoch 10/40
100/100 [==============================] - 22s 222ms/step - loss: 0.3667 - acc: 0.8367 - val_loss: 0.3531 - val_acc: 0.8450 - ac - ETA: 3s - loss: 0.
Epoch 11/40
100/100 [==============================] - 22s 223ms/step - loss: 0.3350 - acc: 0.8572 - val_loss: 0.3230 - val_acc: 0.8655
Epoch 12/40
100/100 [==============================] - 22s 222ms/step - loss: 0.3266 - acc: 0.8512 - val_loss: 0.4057 - val_acc: 0.8100
Epoch 13/40
100/100 [==============================] - 22s 223ms/step - loss: 0.3253 - acc: 0.8600 - val_loss: 0.3008 - val_acc: 0.8731
Epoch 14/40
100/100 [==============================] - 22s 224ms/step - loss: 0.3132 - acc: 0.8676 - val_loss: 0.2686 - val_acc: 0.8938
Epoch 15/40
100/100 [==============================] - 22s 221ms/step - loss: 0.3045 - acc: 0.8719 - val_loss: 0.2913 - val_acc: 0.8744
Epoch 16/40
100/100 [==============================] - 22s 220ms/step - loss: 0.2786 - acc: 0.8803 - val_loss: 0.2655 - val_acc: 0.9005
Epoch 17/40
100/100 [==============================] - 22s 221ms/step - loss: 0.2937 - acc: 0.8784 - val_loss: 0.2780 - val_acc: 0.88563s - loss: 0.2
Epoch 18/40
100/100 [==============================] - 22s 222ms/step - loss: 0.2872 - acc: 0.8831 - val_loss: 0.2560 - val_acc: 0.8894
Epoch 19/40
100/100 [==============================] - 22s 220ms/step - loss: 0.2955 - acc: 0.8722 - val_loss: 0.2505 - val_acc: 0.9019
Epoch 20/40
100/100 [==============================] - 22s 223ms/step - loss: 0.2530 - acc: 0.8995 - val_loss: 0.2420 - val_acc: 0.9119
Epoch 21/40
100/100 [==============================] - 22s 223ms/step - loss: 0.2586 - acc: 0.8915 - val_loss: 0.2706 - val_acc: 0.8974
Epoch 22/40
100/100 [==============================] - 22s 222ms/step - loss: 0.2629 - acc: 0.8897 - val_loss: 0.2269 - val_acc: 0.9119 0
Epoch 23/40
100/100 [==============================] - 22s 220ms/step - loss: 0.2685 - acc: 0.8897 - val_loss: 0.2465 - val_acc: 0.8988
Epoch 24/40
100/100 [==============================] - 22s 223ms/step - loss: 0.2475 - acc: 0.8966 - val_loss: 0.2246 - val_acc: 0.9144
Epoch 25/40
100/100 [==============================] - 22s 222ms/step - loss: 0.2535 - acc: 0.9003 - val_loss: 0.2599 - val_acc: 0.8938
Epoch 26/40
100/100 [==============================] - 22s 221ms/step - loss: 0.2494 - acc: 0.8972 - val_loss: 0.2367 - val_acc: 0.90363s - loss:
Epoch 27/40
100/100 [==============================] - 23s 226ms/step - loss: 0.2621 - acc: 0.8959 - val_loss: 0.2343 - val_acc: 0.9094 acc: 0.8
Epoch 28/40
100/100 [==============================] - 22s 222ms/step - loss: 0.2287 - acc: 0.9097 - val_loss: 0.2345 - val_acc: 0.9106
Epoch 29/40
100/100 [==============================] - 22s 221ms/step - loss: 0.2575 - acc: 0.8922 - val_loss: 0.2582 - val_acc: 0.8950
Epoch 30/40
100/100 [==============================] - 22s 222ms/step - loss: 0.2474 - acc: 0.8969 - val_loss: 0.2256 - val_acc: 0.9100
Epoch 31/40
100/100 [==============================] - 22s 223ms/step - loss: 0.2299 - acc: 0.9122 - val_loss: 0.2390 - val_acc: 0.9055
Epoch 32/40
100/100 [==============================] - 23s 226ms/step - loss: 0.2339 - acc: 0.9059 - val_loss: 0.2260 - val_acc: 0.9087
Epoch 33/40
100/100 [==============================] - 22s 221ms/step - loss: 0.2316 - acc: 0.9091 - val_loss: 0.2363 - val_acc: 0.9069
Epoch 34/40
100/100 [==============================] - 22s 223ms/step - loss: 0.2456 - acc: 0.8988 - val_loss: 0.2545 - val_acc: 0.9025
Epoch 35/40
100/100 [==============================] - 22s 222ms/step - loss: 0.2264 - acc: 0.9094 - val_loss: 0.2175 - val_acc: 0.9200
Epoch 36/40
100/100 [==============================] - 22s 223ms/step - loss: 0.2231 - acc: 0.9123 - val_loss: 0.2159 - val_acc: 0.9136
Epoch 37/40
100/100 [==============================] - 22s 222ms/step - loss: 0.2097 - acc: 0.9163 - val_loss: 0.1938 - val_acc: 0.9244
Epoch 38/40
100/100 [==============================] - 22s 223ms/step - loss: 0.2225 - acc: 0.9066 - val_loss: 0.2203 - val_acc: 0.9125 0.
Epoch 39/40
100/100 [==============================] - 22s 219ms/step - loss: 0.2203 - acc: 0.9147 - val_loss: 0.2428 - val_acc: 0.9000
Epoch 40/40
100/100 [==============================] - 22s 222ms/step - loss: 0.2361 - acc: 0.9113 - val_loss: 0.2390 - val_acc: 0.9137

test accuracy: 0.910625
test loss: 0.24269666660577058
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 32)        896       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 32)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 64)        18496     
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 64)        0         
_________________________________________________________________
conv3 (Conv2D)               (None, 16, 16, 128)       73856     
_________________________________________________________________
max_pool3 (MaxPooling2D)     (None, 8, 8, 128)         0         
_________________________________________________________________
flatten_3 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               4194816   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 4,288,577
Trainable params: 4,288,577
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 23s 226ms/step - loss: 0.6647 - acc: 0.6538 - val_loss: 0.6241 - val_acc: 0.7125
Epoch 2/40
100/100 [==============================] - 21s 207ms/step - loss: 0.5725 - acc: 0.7303 - val_loss: 0.5478 - val_acc: 0.7256
Epoch 3/40
100/100 [==============================] - 21s 209ms/step - loss: 0.5320 - acc: 0.7278 - val_loss: 0.5461 - val_acc: 0.7200
Epoch 4/40
100/100 [==============================] - 21s 208ms/step - loss: 0.4803 - acc: 0.7734 - val_loss: 0.4686 - val_acc: 0.7825- acc: 0. - ETA: 10s - loss: - ETA: 6s - loss: 0.4852 - acc - ETA: 4s 
Epoch 5/40
100/100 [==============================] - 21s 209ms/step - loss: 0.4788 - acc: 0.7541 - val_loss: 0.4483 - val_acc: 0.7925.75
Epoch 6/40
100/100 [==============================] - 21s 208ms/step - loss: 0.4141 - acc: 0.8069 - val_loss: 0.4101 - val_acc: 0.7985
Epoch 7/40
100/100 [==============================] - 21s 209ms/step - loss: 0.4255 - acc: 0.7950 - val_loss: 0.3964 - val_acc: 0.81504289 - acc: 
Epoch 8/40
100/100 [==============================] - 21s 207ms/step - loss: 0.4058 - acc: 0.8042 - val_loss: 0.3743 - val_acc: 0.8369
Epoch 9/40
100/100 [==============================] - 21s 211ms/step - loss: 0.3794 - acc: 0.8213 - val_loss: 0.3511 - val_acc: 0.8400
Epoch 10/40
100/100 [==============================] - 21s 208ms/step - loss: 0.3800 - acc: 0.8278 - val_loss: 0.3225 - val_acc: 0.8588
Epoch 11/40
100/100 [==============================] - 21s 208ms/step - loss: 0.3494 - acc: 0.8416 - val_loss: 0.3386 - val_acc: 0.8554
Epoch 12/40
100/100 [==============================] - 21s 207ms/step - loss: 0.3389 - acc: 0.8447 - val_loss: 0.3301 - val_acc: 0.8575
Epoch 13/40
100/100 [==============================] - 21s 208ms/step - loss: 0.3417 - acc: 0.8553 - val_loss: 0.3090 - val_acc: 0.8688
Epoch 14/40
100/100 [==============================] - 21s 208ms/step - loss: 0.3323 - acc: 0.8509 - val_loss: 0.3277 - val_acc: 0.8625
Epoch 15/40
100/100 [==============================] - 21s 206ms/step - loss: 0.3159 - acc: 0.8595 - val_loss: 0.3173 - val_acc: 0.8625
Epoch 16/40
100/100 [==============================] - 21s 207ms/step - loss: 0.3293 - acc: 0.8583 - val_loss: 0.3080 - val_acc: 0.8742
Epoch 17/40
100/100 [==============================] - 21s 207ms/step - loss: 0.3063 - acc: 0.8641 - val_loss: 0.3052 - val_acc: 0.8662
Epoch 18/40
100/100 [==============================] - 21s 209ms/step - loss: 0.3070 - acc: 0.8628 - val_loss: 0.2883 - val_acc: 0.8788
Epoch 19/40
100/100 [==============================] - 21s 207ms/step - loss: 0.2905 - acc: 0.8731 - val_loss: 0.2790 - val_acc: 0.8869
Epoch 20/40
100/100 [==============================] - 21s 207ms/step - loss: 0.2910 - acc: 0.8778 - val_loss: 0.3557 - val_acc: 0.8550
Epoch 21/40
100/100 [==============================] - 21s 209ms/step - loss: 0.2863 - acc: 0.8856 - val_loss: 0.4062 - val_acc: 0.8123
Epoch 22/40
100/100 [==============================] - 21s 213ms/step - loss: 0.2810 - acc: 0.8800 - val_loss: 0.2825 - val_acc: 0.8844
Epoch 23/40
100/100 [==============================] - 21s 209ms/step - loss: 0.2846 - acc: 0.8826 - val_loss: 0.2704 - val_acc: 0.8906
Epoch 24/40
100/100 [==============================] - 21s 207ms/step - loss: 0.2738 - acc: 0.8859 - val_loss: 0.2676 - val_acc: 0.8869
Epoch 25/40
100/100 [==============================] - 21s 208ms/step - loss: 0.2652 - acc: 0.8897 - val_loss: 0.2545 - val_acc: 0.9012
Epoch 26/40
100/100 [==============================] - 21s 207ms/step - loss: 0.2705 - acc: 0.8903 - val_loss: 0.3287 - val_acc: 0.8498
Epoch 27/40
100/100 [==============================] - 21s 206ms/step - loss: 0.2627 - acc: 0.8966 - val_loss: 0.2602 - val_acc: 0.9006
Epoch 28/40
100/100 [==============================] - 21s 209ms/step - loss: 0.2633 - acc: 0.8893 - val_loss: 0.2548 - val_acc: 0.8906
Epoch 29/40
100/100 [==============================] - 23s 227ms/step - loss: 0.2558 - acc: 0.8950 - val_loss: 0.2367 - val_acc: 0.9062
Epoch 30/40
100/100 [==============================] - 22s 223ms/step - loss: 0.2653 - acc: 0.8894 - val_loss: 0.2672 - val_acc: 0.9031
Epoch 31/40
100/100 [==============================] - 26s 261ms/step - loss: 0.2429 - acc: 0.9019 - val_loss: 0.2572 - val_acc: 0.9005
Epoch 32/40
100/100 [==============================] - 25s 251ms/step - loss: 0.2628 - acc: 0.8937 - val_loss: 0.2591 - val_acc: 0.9025
Epoch 33/40
100/100 [==============================] - 26s 257ms/step - loss: 0.2449 - acc: 0.9012 - val_loss: 0.2554 - val_acc: 0.9006
Epoch 34/40
100/100 [==============================] - 97s 968ms/step - loss: 0.2312 - acc: 0.9014 - val_loss: 0.2262 - val_acc: 0.9087
Epoch 35/40
100/100 [==============================] - 20s 203ms/step - loss: 0.2496 - acc: 0.9022 - val_loss: 0.2548 - val_acc: 0.9000
Epoch 36/40
100/100 [==============================] - 18s 175ms/step - loss: 0.2437 - acc: 0.9069 - val_loss: 0.2316 - val_acc: 0.9074
Epoch 37/40
100/100 [==============================] - 15s 155ms/step - loss: 0.2424 - acc: 0.9034 - val_loss: 0.2985 - val_acc: 0.8925
Epoch 38/40
100/100 [==============================] - 15s 148ms/step - loss: 0.2193 - acc: 0.9142 - val_loss: 0.2322 - val_acc: 0.9106
Epoch 39/40
100/100 [==============================] - 15s 151ms/step - loss: 0.2385 - acc: 0.9009 - val_loss: 0.2404 - val_acc: 0.9106
Epoch 40/40
100/100 [==============================] - 15s 150ms/step - loss: 0.2460 - acc: 0.8997 - val_loss: 0.3254 - val_acc: 0.8594

test accuracy: 0.8465625
test loss: 0.36982207752764223
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 32)        896       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 32)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 64)        18496     
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 64)        0         
_________________________________________________________________
conv3 (Conv2D)               (None, 16, 16, 128)       73856     
_________________________________________________________________
max_pool3 (MaxPooling2D)     (None, 8, 8, 128)         0         
_________________________________________________________________
conv4 (Conv2D)               (None, 8, 8, 256)         295168    
_________________________________________________________________
max_pool4 (MaxPooling2D)     (None, 4, 4, 256)         0         
_________________________________________________________________
flatten_4 (Flatten)          (None, 4096)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               2097664   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 2,486,593
Trainable params: 2,486,593
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 18s 176ms/step - loss: 0.6694 - acc: 0.6496 - val_loss: 0.6406 - val_acc: 0.6994
Epoch 2/40
100/100 [==============================] - 17s 170ms/step - loss: 0.6002 - acc: 0.7025 - val_loss: 0.5649 - val_acc: 0.6987
Epoch 3/40
100/100 [==============================] - 17s 168ms/step - loss: 0.5318 - acc: 0.7266 - val_loss: 0.5293 - val_acc: 0.7406
Epoch 4/40
100/100 [==============================] - 17s 169ms/step - loss: 0.5038 - acc: 0.7534 - val_loss: 0.4745 - val_acc: 0.7606
Epoch 5/40
100/100 [==============================] - 17s 169ms/step - loss: 0.4835 - acc: 0.7647 - val_loss: 0.4298 - val_acc: 0.8081
Epoch 6/40
100/100 [==============================] - 16s 160ms/step - loss: 0.4327 - acc: 0.7850 - val_loss: 0.3917 - val_acc: 0.8204
Epoch 7/40
100/100 [==============================] - 16s 158ms/step - loss: 0.4172 - acc: 0.8028 - val_loss: 0.4230 - val_acc: 0.8031
Epoch 8/40
100/100 [==============================] - 16s 158ms/step - loss: 0.4055 - acc: 0.8109 - val_loss: 0.3754 - val_acc: 0.8263
Epoch 9/40
100/100 [==============================] - 16s 158ms/step - loss: 0.3883 - acc: 0.8175 - val_loss: 0.3482 - val_acc: 0.8512
Epoch 10/40
100/100 [==============================] - 16s 158ms/step - loss: 0.3724 - acc: 0.8228 - val_loss: 0.3339 - val_acc: 0.8475
Epoch 11/40
100/100 [==============================] - 16s 164ms/step - loss: 0.3617 - acc: 0.8269 - val_loss: 0.3526 - val_acc: 0.8373
Epoch 12/40
100/100 [==============================] - 16s 159ms/step - loss: 0.3686 - acc: 0.8381 - val_loss: 0.3495 - val_acc: 0.8381
Epoch 13/40
100/100 [==============================] - 16s 159ms/step - loss: 0.3344 - acc: 0.8450 - val_loss: 0.3450 - val_acc: 0.8469
Epoch 14/40
100/100 [==============================] - 16s 158ms/step - loss: 0.3309 - acc: 0.8461 - val_loss: 0.3080 - val_acc: 0.8675
Epoch 15/40
100/100 [==============================] - 16s 157ms/step - loss: 0.3448 - acc: 0.8450 - val_loss: 0.3159 - val_acc: 0.8681
Epoch 16/40
100/100 [==============================] - 16s 161ms/step - loss: 0.3193 - acc: 0.8578 - val_loss: 0.3226 - val_acc: 0.8511
Epoch 17/40
100/100 [==============================] - 16s 160ms/step - loss: 0.3179 - acc: 0.8559 - val_loss: 0.3018 - val_acc: 0.8725
Epoch 18/40
100/100 [==============================] - 16s 163ms/step - loss: 0.3112 - acc: 0.8628 - val_loss: 0.3315 - val_acc: 0.8531
Epoch 19/40
100/100 [==============================] - 16s 159ms/step - loss: 0.3117 - acc: 0.8659 - val_loss: 0.4334 - val_acc: 0.8013
Epoch 20/40
100/100 [==============================] - 16s 159ms/step - loss: 0.3110 - acc: 0.8662 - val_loss: 0.2862 - val_acc: 0.8769
Epoch 21/40
100/100 [==============================] - 16s 161ms/step - loss: 0.3179 - acc: 0.8619 - val_loss: 0.2978 - val_acc: 0.8780
Epoch 22/40
100/100 [==============================] - 16s 160ms/step - loss: 0.2844 - acc: 0.8744 - val_loss: 0.3326 - val_acc: 0.8594
Epoch 23/40
100/100 [==============================] - 16s 160ms/step - loss: 0.3064 - acc: 0.8678 - val_loss: 0.2767 - val_acc: 0.8844
Epoch 24/40
100/100 [==============================] - 16s 160ms/step - loss: 0.2947 - acc: 0.8730 - val_loss: 0.2537 - val_acc: 0.8919
Epoch 25/40
100/100 [==============================] - 16s 158ms/step - loss: 0.2879 - acc: 0.8784 - val_loss: 0.2939 - val_acc: 0.8806
Epoch 26/40
100/100 [==============================] - 16s 159ms/step - loss: 0.2823 - acc: 0.8806 - val_loss: 0.2849 - val_acc: 0.8899
Epoch 27/40
100/100 [==============================] - 16s 159ms/step - loss: 0.2959 - acc: 0.8756 - val_loss: 0.2702 - val_acc: 0.8856
Epoch 28/40
100/100 [==============================] - 16s 158ms/step - loss: 0.2612 - acc: 0.8853 - val_loss: 0.2703 - val_acc: 0.8938
Epoch 29/40
100/100 [==============================] - 16s 163ms/step - loss: 0.2750 - acc: 0.8793 - val_loss: 0.2841 - val_acc: 0.8869
Epoch 30/40
100/100 [==============================] - 16s 160ms/step - loss: 0.2866 - acc: 0.8844 - val_loss: 0.2800 - val_acc: 0.8756
Epoch 31/40
100/100 [==============================] - 16s 163ms/step - loss: 0.2905 - acc: 0.8831 - val_loss: 0.2825 - val_acc: 0.8817
Epoch 32/40
100/100 [==============================] - 16s 159ms/step - loss: 0.2574 - acc: 0.8928 - val_loss: 0.2892 - val_acc: 0.8788
Epoch 33/40
100/100 [==============================] - 16s 162ms/step - loss: 0.2734 - acc: 0.8834 - val_loss: 0.2643 - val_acc: 0.8906
Epoch 34/40
100/100 [==============================] - 16s 161ms/step - loss: 0.2895 - acc: 0.8775 - val_loss: 0.2959 - val_acc: 0.8700
Epoch 35/40
100/100 [==============================] - 16s 160ms/step - loss: 0.2562 - acc: 0.8892 - val_loss: 0.2475 - val_acc: 0.9025
Epoch 36/40
100/100 [==============================] - 16s 162ms/step - loss: 0.2730 - acc: 0.8834 - val_loss: 0.2342 - val_acc: 0.9080
Epoch 37/40
100/100 [==============================] - 17s 170ms/step - loss: 0.2582 - acc: 0.8906 - val_loss: 0.2637 - val_acc: 0.8888
Epoch 38/40
100/100 [==============================] - 16s 162ms/step - loss: 0.2342 - acc: 0.9041 - val_loss: 0.2573 - val_acc: 0.8994
Epoch 39/40
100/100 [==============================] - 16s 164ms/step - loss: 0.2788 - acc: 0.8859 - val_loss: 0.2863 - val_acc: 0.8731
Epoch 40/40
100/100 [==============================] - 16s 160ms/step - loss: 0.2496 - acc: 0.8937 - val_loss: 0.3113 - val_acc: 0.8581

test accuracy: 0.846875
test loss: 0.3431397096812725
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 32)        896       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 32)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 64)        18496     
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 64)        0         
_________________________________________________________________
conv3 (Conv2D)               (None, 16, 16, 128)       73856     
_________________________________________________________________
max_pool3 (MaxPooling2D)     (None, 8, 8, 128)         0         
_________________________________________________________________
conv4 (Conv2D)               (None, 8, 8, 256)         295168    
_________________________________________________________________
max_pool4 (MaxPooling2D)     (None, 4, 4, 256)         0         
_________________________________________________________________
conv5 (Conv2D)               (None, 4, 4, 512)         1180160   
_________________________________________________________________
max_pool5 (MaxPooling2D)     (None, 2, 2, 512)         0         
_________________________________________________________________
flatten_5 (Flatten)          (None, 2048)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               1049088   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 2,618,177
Trainable params: 2,618,177
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 24s 241ms/step - loss: 0.6860 - acc: 0.6106 - val_loss: 0.6767 - val_acc: 0.7338
Epoch 2/40
100/100 [==============================] - 23s 233ms/step - loss: 0.6627 - acc: 0.7126 - val_loss: 0.6437 - val_acc: 0.7075
Epoch 3/40
100/100 [==============================] - 24s 237ms/step - loss: 0.6022 - acc: 0.7175 - val_loss: 0.5815 - val_acc: 0.6669
Epoch 4/40
100/100 [==============================] - 24s 237ms/step - loss: 0.5619 - acc: 0.7084 - val_loss: 0.5635 - val_acc: 0.6813
Epoch 5/40
100/100 [==============================] - 24s 240ms/step - loss: 0.5399 - acc: 0.7272 - val_loss: 0.5430 - val_acc: 0.7194
Epoch 6/40
100/100 [==============================] - 24s 238ms/step - loss: 0.5142 - acc: 0.7422 - val_loss: 0.4408 - val_acc: 0.7816
Epoch 7/40
100/100 [==============================] - 24s 238ms/step - loss: 0.4925 - acc: 0.7594 - val_loss: 0.4099 - val_acc: 0.8075
Epoch 8/40
100/100 [==============================] - 24s 238ms/step - loss: 0.5655 - acc: 0.7095 - val_loss: 0.4347 - val_acc: 0.7881
Epoch 9/40
100/100 [==============================] - 24s 239ms/step - loss: 4.7670 - acc: 0.5722 - val_loss: 8.1195 - val_acc: 0.4963
Epoch 10/40
100/100 [==============================] - 24s 238ms/step - loss: 8.6333 - acc: 0.4644 - val_loss: 8.4217 - val_acc: 0.4775
Epoch 11/40
100/100 [==============================] - 24s 239ms/step - loss: 8.3663 - acc: 0.4809 - val_loss: 8.3717 - val_acc: 0.4806
Epoch 12/40
100/100 [==============================] - 24s 243ms/step - loss: 8.5224 - acc: 0.4713 - val_loss: 8.1094 - val_acc: 0.4969
Epoch 13/40
100/100 [==============================] - 23s 235ms/step - loss: 8.6010 - acc: 0.4664 - val_loss: 8.4821 - val_acc: 0.4738
Epoch 14/40
100/100 [==============================] - 24s 235ms/step - loss: 8.7087 - acc: 0.4597 - val_loss: 8.3307 - val_acc: 0.4831
Epoch 15/40
100/100 [==============================] - 24s 237ms/step - loss: 8.6081 - acc: 0.4659 - val_loss: 8.5124 - val_acc: 0.4719
Epoch 16/40
100/100 [==============================] - 24s 236ms/step - loss: 8.4167 - acc: 0.4778 - val_loss: 8.5230 - val_acc: 0.4712
Epoch 17/40
100/100 [==============================] - 24s 238ms/step - loss: 8.4419 - acc: 0.4763 - val_loss: 8.1594 - val_acc: 0.4938
Epoch 18/40
100/100 [==============================] - 25s 254ms/step - loss: 8.7924 - acc: 0.4545 - val_loss: 8.4217 - val_acc: 0.4775
Epoch 19/40
100/100 [==============================] - 26s 257ms/step - loss: 8.4570 - acc: 0.4753 - val_loss: 8.1900 - val_acc: 0.4919
Epoch 20/40
100/100 [==============================] - 26s 255ms/step - loss: 8.6786 - acc: 0.4616 - val_loss: 8.6131 - val_acc: 0.4656
Epoch 21/40
100/100 [==============================] - 24s 240ms/step - loss: 8.7228 - acc: 0.4588 - val_loss: 8.0489 - val_acc: 0.5006
Epoch 22/40
100/100 [==============================] - 23s 230ms/step - loss: 8.2301 - acc: 0.4856 - val_loss: 8.3109 - val_acc: 0.4844
Epoch 23/40
100/100 [==============================] - 23s 230ms/step - loss: 8.4668 - acc: 0.4747 - val_loss: 8.6736 - val_acc: 0.4619
Epoch 24/40
100/100 [==============================] - 23s 231ms/step - loss: 8.5023 - acc: 0.4725 - val_loss: 8.3814 - val_acc: 0.4800
Epoch 25/40
100/100 [==============================] - 23s 232ms/step - loss: 8.8146 - acc: 0.4531 - val_loss: 8.2102 - val_acc: 0.4906
Epoch 26/40
100/100 [==============================] - 23s 232ms/step - loss: 8.7541 - acc: 0.4569 - val_loss: 8.2709 - val_acc: 0.4869
Epoch 27/40
100/100 [==============================] - 23s 232ms/step - loss: 8.4318 - acc: 0.4769 - val_loss: 8.4318 - val_acc: 0.4769
Epoch 28/40
100/100 [==============================] - 23s 231ms/step - loss: 8.5364 - acc: 0.4704 - val_loss: 8.6534 - val_acc: 0.4631
Epoch 29/40
100/100 [==============================] - 23s 230ms/step - loss: 8.6736 - acc: 0.4619 - val_loss: 8.4116 - val_acc: 0.4781
Epoch 30/40
100/100 [==============================] - 23s 231ms/step - loss: 8.4721 - acc: 0.4744 - val_loss: 8.1799 - val_acc: 0.4925
Epoch 31/40
100/100 [==============================] - 24s 235ms/step - loss: 8.5577 - acc: 0.4691 - val_loss: 8.4726 - val_acc: 0.4743
Epoch 32/40
100/100 [==============================] - 23s 235ms/step - loss: 8.5980 - acc: 0.4666 - val_loss: 8.4419 - val_acc: 0.4763
Epoch 33/40
100/100 [==============================] - 23s 232ms/step - loss: 8.7995 - acc: 0.4541 - val_loss: 8.4016 - val_acc: 0.4788
Epoch 34/40
100/100 [==============================] - 24s 242ms/step - loss: 8.6926 - acc: 0.4607 - val_loss: 8.2907 - val_acc: 0.4856
Epoch 35/40
100/100 [==============================] - 24s 239ms/step - loss: 8.5627 - acc: 0.4688 - val_loss: 8.5426 - val_acc: 0.4700
Epoch 36/40
100/100 [==============================] - 24s 241ms/step - loss: 8.5224 - acc: 0.4713 - val_loss: 8.1902 - val_acc: 0.4919
Epoch 37/40
100/100 [==============================] - 24s 238ms/step - loss: 8.5174 - acc: 0.4716 - val_loss: 8.1900 - val_acc: 0.4919
Epoch 38/40
100/100 [==============================] - 24s 238ms/step - loss: 8.5950 - acc: 0.4667 - val_loss: 8.2404 - val_acc: 0.4888
Epoch 39/40
100/100 [==============================] - 24s 237ms/step - loss: 8.6836 - acc: 0.4612 - val_loss: 8.4116 - val_acc: 0.4781
Epoch 40/40
100/100 [==============================] - 25s 252ms/step - loss: 8.4267 - acc: 0.4772 - val_loss: 8.4116 - val_acc: 0.4781

test accuracy: 0.4678125
test loss: 8.577848987579346

1.3.2 Architecture Choice -- Determine the number of feature maps

Here, we set the number of convolution-subsambling pairs as 2. We will try differnet values for the number of feature maps for the two convolutional layers. The paris of the first and second convolutional layers we will are: (16, 32), (32, 64), (48, 96), (64, 128), and (80, 160).

In [8]:
def CNN_Architecture_Selector_2 (activation_function_1, activation_function_2, optimizer_type, epoch_number):
    K.clear_session()
    model = [0] * 5
    
    for j in range(0, 5):
        model[j] = models.Sequential()
        model[j].add(layers.Conv2D(j*16+16, (3, 3), padding='same', activation=activation_function_1,
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool1'))
        
        model[j].add(layers.Conv2D(j*32+32, (3, 3), padding='same', activation=activation_function_1, name = 'conv2'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool2'))
        
        model[j].add(layers.Flatten())
        model[j].add(layers.Dense(512, kernel_initializer='glorot_uniform', activation=activation_function_1, name='fc1'))
        model[j].add(layers.Dense(1, kernel_initializer='glorot_uniform', activation=activation_function_2, name='fc2'))
        model[j].compile(loss='binary_crossentropy', optimizer = optimizer_type, metrics=['accuracy'])
    
        model[j].summary()
        
         # Fit the model
        history = model[j].fit_generator(
        train_generator,
        steps_per_epoch=100,
        epochs=epoch_number,
        validation_data=validation_generator,
        validation_steps=50,
        verbose=1, 
        # callbacks=[TrainValTensorBoard("logs/{}".format(time()), write_graph=True)]
        )
        test_loss, test_acc = model[j].evaluate_generator(test_generator, steps=100)
        print('\ntest accuracy:', test_acc)
        print('test loss:', test_loss)
    
        acc = history.history['acc']
        val_acc = history.history['val_acc']
        loss = history.history['loss']
        val_loss = history.history['val_loss']
        epochs = range(len(acc))
    
        plt.plot(epochs, acc, 'bo', label='Training acc')
        plt.plot(epochs, val_acc, 'g-', label='Validation acc')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Accuracy")
        plt.title('Training and validation accuracy')
        plt.legend()
        plt.figure()

        plt.plot(epochs, loss, 'bo', label='Training loss')
        plt.plot(epochs, val_loss, 'g-', label='Validation loss')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Loss")
        plt.title('Training and validation loss')
        plt.legend()
        
        plt.show()
In [9]:
sgd = optimizers.SGD(lr = 0.05, decay=1e-5, momentum=0.9, nesterov=True)
CNN_Architecture_Selector_2('relu', 'sigmoid', 'sgd', 40)
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               4194816   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 4,200,417
Trainable params: 4,200,417
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 19s 186ms/step - loss: 0.6108 - acc: 0.6856 - val_loss: 0.5337 - val_acc: 0.7394
Epoch 2/40
100/100 [==============================] - 14s 138ms/step - loss: 0.5178 - acc: 0.7475 - val_loss: 0.5010 - val_acc: 0.7512
Epoch 3/40
100/100 [==============================] - 13s 128ms/step - loss: 0.4852 - acc: 0.7616 - val_loss: 0.4474 - val_acc: 0.8075
Epoch 4/40
100/100 [==============================] - 14s 142ms/step - loss: 0.4458 - acc: 0.7947 - val_loss: 0.4294 - val_acc: 0.8063
Epoch 5/40
100/100 [==============================] - 11s 113ms/step - loss: 0.4202 - acc: 0.8041 - val_loss: 0.4065 - val_acc: 0.8037
Epoch 6/40
100/100 [==============================] - 10s 97ms/step - loss: 0.4075 - acc: 0.8091 - val_loss: 0.3881 - val_acc: 0.8229
Epoch 7/40
100/100 [==============================] - 8s 84ms/step - loss: 0.4104 - acc: 0.8031 - val_loss: 0.3706 - val_acc: 0.8387
Epoch 8/40
100/100 [==============================] - 9s 89ms/step - loss: 0.3742 - acc: 0.8353 - val_loss: 0.3389 - val_acc: 0.8456
Epoch 9/40
100/100 [==============================] - 9s 87ms/step - loss: 0.3655 - acc: 0.8350 - val_loss: 0.3583 - val_acc: 0.8306
Epoch 10/40
100/100 [==============================] - 9s 86ms/step - loss: 0.3539 - acc: 0.8409 - val_loss: 0.3389 - val_acc: 0.8594
Epoch 11/40
100/100 [==============================] - 10s 99ms/step - loss: 0.3533 - acc: 0.8356 - val_loss: 0.3460 - val_acc: 0.8498
Epoch 12/40
100/100 [==============================] - 10s 96ms/step - loss: 0.3451 - acc: 0.8450 - val_loss: 0.3185 - val_acc: 0.8681
Epoch 13/40
100/100 [==============================] - 9s 87ms/step - loss: 0.3429 - acc: 0.8519 - val_loss: 0.3335 - val_acc: 0.8531
Epoch 14/40
100/100 [==============================] - 9s 86ms/step - loss: 0.3300 - acc: 0.8528 - val_loss: 0.3296 - val_acc: 0.8594
Epoch 15/40
100/100 [==============================] - 10s 96ms/step - loss: 0.3093 - acc: 0.8642 - val_loss: 0.2993 - val_acc: 0.8619
Epoch 16/40
100/100 [==============================] - 9s 86ms/step - loss: 0.3065 - acc: 0.8694 - val_loss: 0.3000 - val_acc: 0.8642
Epoch 17/40
100/100 [==============================] - 9s 89ms/step - loss: 0.3132 - acc: 0.8616 - val_loss: 0.2829 - val_acc: 0.8812
Epoch 18/40
100/100 [==============================] - 8s 85ms/step - loss: 0.2955 - acc: 0.8809 - val_loss: 0.2730 - val_acc: 0.8900
Epoch 19/40
100/100 [==============================] - 9s 92ms/step - loss: 0.3017 - acc: 0.8762 - val_loss: 0.2603 - val_acc: 0.8988
Epoch 20/40
100/100 [==============================] - 8s 84ms/step - loss: 0.2743 - acc: 0.8821 - val_loss: 0.2655 - val_acc: 0.8956
Epoch 21/40
100/100 [==============================] - 9s 86ms/step - loss: 0.2978 - acc: 0.8756 - val_loss: 0.2677 - val_acc: 0.8855
Epoch 22/40
100/100 [==============================] - 9s 85ms/step - loss: 0.2870 - acc: 0.8781 - val_loss: 0.2578 - val_acc: 0.8950
Epoch 23/40
100/100 [==============================] - 8s 85ms/step - loss: 0.2741 - acc: 0.8844 - val_loss: 0.2643 - val_acc: 0.8925
Epoch 24/40
100/100 [==============================] - 8s 84ms/step - loss: 0.2781 - acc: 0.8884 - val_loss: 0.2664 - val_acc: 0.9038
Epoch 25/40
100/100 [==============================] - 8s 84ms/step - loss: 0.2658 - acc: 0.8903 - val_loss: 0.3056 - val_acc: 0.8794
Epoch 26/40
100/100 [==============================] - 9s 86ms/step - loss: 0.2609 - acc: 0.8956 - val_loss: 0.2672 - val_acc: 0.8961
Epoch 27/40
100/100 [==============================] - 8s 84ms/step - loss: 0.2598 - acc: 0.8978 - val_loss: 0.3168 - val_acc: 0.8556
Epoch 28/40
100/100 [==============================] - 9s 85ms/step - loss: 0.2728 - acc: 0.8878 - val_loss: 0.2365 - val_acc: 0.9075
Epoch 29/40
100/100 [==============================] - 9s 85ms/step - loss: 0.2389 - acc: 0.9046 - val_loss: 0.2535 - val_acc: 0.8994
Epoch 30/40
100/100 [==============================] - 9s 86ms/step - loss: 0.2545 - acc: 0.8891 - val_loss: 0.2294 - val_acc: 0.9069
Epoch 31/40
100/100 [==============================] - 9s 85ms/step - loss: 0.2432 - acc: 0.9044 - val_loss: 0.2601 - val_acc: 0.8980
Epoch 32/40
100/100 [==============================] - 9s 93ms/step - loss: 0.2550 - acc: 0.8944 - val_loss: 0.2507 - val_acc: 0.9113
Epoch 33/40
100/100 [==============================] - 9s 93ms/step - loss: 0.2404 - acc: 0.9078 - val_loss: 0.2340 - val_acc: 0.8994
Epoch 34/40
100/100 [==============================] - 9s 92ms/step - loss: 0.2451 - acc: 0.9028 - val_loss: 0.2611 - val_acc: 0.8862
Epoch 35/40
100/100 [==============================] - 9s 87ms/step - loss: 0.2547 - acc: 0.9030 - val_loss: 0.2378 - val_acc: 0.9044
Epoch 36/40
100/100 [==============================] - 9s 87ms/step - loss: 0.2262 - acc: 0.9094 - val_loss: 0.2294 - val_acc: 0.9161
Epoch 37/40
100/100 [==============================] - 9s 88ms/step - loss: 0.2287 - acc: 0.9097 - val_loss: 0.3808 - val_acc: 0.8400
Epoch 38/40
100/100 [==============================] - 9s 93ms/step - loss: 0.2376 - acc: 0.9038 - val_loss: 0.2715 - val_acc: 0.8781
Epoch 39/40
100/100 [==============================] - 11s 112ms/step - loss: 0.2344 - acc: 0.8984 - val_loss: 0.2250 - val_acc: 0.9113
Epoch 40/40
100/100 [==============================] - 8s 81ms/step - loss: 0.2271 - acc: 0.9122 - val_loss: 0.3085 - val_acc: 0.8612

test accuracy: 0.8578125
test loss: 0.339173599332571
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 32)        896       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 32)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 64)        18496     
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 64)        0         
_________________________________________________________________
flatten_2 (Flatten)          (None, 16384)             0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               8389120   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 8,409,025
Trainable params: 8,409,025
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 17s 166ms/step - loss: 0.6274 - acc: 0.6859 - val_loss: 0.5493 - val_acc: 0.7488
Epoch 2/40
100/100 [==============================] - 16s 161ms/step - loss: 0.5186 - acc: 0.7594 - val_loss: 0.4791 - val_acc: 0.7588
Epoch 3/40
100/100 [==============================] - 15s 151ms/step - loss: 0.4899 - acc: 0.7578 - val_loss: 0.4353 - val_acc: 0.8031
Epoch 4/40
100/100 [==============================] - 15s 153ms/step - loss: 0.4529 - acc: 0.7853 - val_loss: 0.4460 - val_acc: 0.7950
Epoch 5/40
100/100 [==============================] - 15s 153ms/step - loss: 0.4210 - acc: 0.8059 - val_loss: 0.4261 - val_acc: 0.7894
Epoch 6/40
100/100 [==============================] - 15s 152ms/step - loss: 0.4097 - acc: 0.8088 - val_loss: 0.3571 - val_acc: 0.8548
Epoch 7/40
100/100 [==============================] - 15s 153ms/step - loss: 0.3827 - acc: 0.8269 - val_loss: 0.3645 - val_acc: 0.8350
Epoch 8/40
100/100 [==============================] - 15s 151ms/step - loss: 0.3719 - acc: 0.8319 - val_loss: 0.3427 - val_acc: 0.8425
Epoch 9/40
100/100 [==============================] - 15s 154ms/step - loss: 0.3584 - acc: 0.8419 - val_loss: 0.3722 - val_acc: 0.8263
Epoch 10/40
100/100 [==============================] - 15s 153ms/step - loss: 0.3493 - acc: 0.8410 - val_loss: 0.3554 - val_acc: 0.8413
Epoch 11/40
100/100 [==============================] - 15s 154ms/step - loss: 0.3332 - acc: 0.8521 - val_loss: 0.3753 - val_acc: 0.8292
Epoch 12/40
100/100 [==============================] - 15s 150ms/step - loss: 0.3206 - acc: 0.8522 - val_loss: 0.3020 - val_acc: 0.8800
Epoch 13/40
100/100 [==============================] - 16s 161ms/step - loss: 0.3043 - acc: 0.8750 - val_loss: 0.2881 - val_acc: 0.8800
Epoch 14/40
100/100 [==============================] - 16s 162ms/step - loss: 0.3045 - acc: 0.8669 - val_loss: 0.3518 - val_acc: 0.8413
Epoch 15/40
100/100 [==============================] - 17s 174ms/step - loss: 0.3127 - acc: 0.8678 - val_loss: 0.2886 - val_acc: 0.8606
Epoch 16/40
100/100 [==============================] - 17s 165ms/step - loss: 0.2841 - acc: 0.8762 - val_loss: 0.2743 - val_acc: 0.8861
Epoch 17/40
100/100 [==============================] - 16s 163ms/step - loss: 0.2603 - acc: 0.8931 - val_loss: 0.2604 - val_acc: 0.8956
Epoch 18/40
100/100 [==============================] - 17s 168ms/step - loss: 0.2923 - acc: 0.8878 - val_loss: 0.2618 - val_acc: 0.8975
Epoch 19/40
100/100 [==============================] - 17s 166ms/step - loss: 0.2724 - acc: 0.8916 - val_loss: 0.2735 - val_acc: 0.9006
Epoch 20/40
100/100 [==============================] - 16s 159ms/step - loss: 0.2887 - acc: 0.8806 - val_loss: 0.2419 - val_acc: 0.9006
Epoch 21/40
100/100 [==============================] - 16s 162ms/step - loss: 0.2643 - acc: 0.8900 - val_loss: 0.2698 - val_acc: 0.8980
Epoch 22/40
100/100 [==============================] - 16s 162ms/step - loss: 0.2579 - acc: 0.8978 - val_loss: 0.2513 - val_acc: 0.9000
Epoch 23/40
100/100 [==============================] - 17s 167ms/step - loss: 0.2558 - acc: 0.8900 - val_loss: 0.2389 - val_acc: 0.9056
Epoch 24/40
100/100 [==============================] - 17s 171ms/step - loss: 0.2565 - acc: 0.8964 - val_loss: 0.2814 - val_acc: 0.8794
Epoch 25/40
100/100 [==============================] - 16s 163ms/step - loss: 0.2536 - acc: 0.8931 - val_loss: 0.3002 - val_acc: 0.8781
Epoch 26/40
100/100 [==============================] - 16s 163ms/step - loss: 0.2604 - acc: 0.8925 - val_loss: 0.2389 - val_acc: 0.9099
Epoch 27/40
100/100 [==============================] - 16s 159ms/step - loss: 0.2459 - acc: 0.8959 - val_loss: 0.2953 - val_acc: 0.8862
Epoch 28/40
100/100 [==============================] - 15s 154ms/step - loss: 0.2374 - acc: 0.9047 - val_loss: 0.2685 - val_acc: 0.8950
Epoch 29/40
100/100 [==============================] - 15s 154ms/step - loss: 0.2453 - acc: 0.9034 - val_loss: 0.2293 - val_acc: 0.9094
Epoch 30/40
100/100 [==============================] - 15s 153ms/step - loss: 0.2340 - acc: 0.9051 - val_loss: 0.2727 - val_acc: 0.8956
Epoch 31/40
100/100 [==============================] - 15s 154ms/step - loss: 0.2265 - acc: 0.9097 - val_loss: 0.2383 - val_acc: 0.9043
Epoch 32/40
100/100 [==============================] - 15s 154ms/step - loss: 0.2275 - acc: 0.9031 - val_loss: 0.2472 - val_acc: 0.9012
Epoch 33/40
100/100 [==============================] - 16s 158ms/step - loss: 0.2328 - acc: 0.9121 - val_loss: 0.2282 - val_acc: 0.9131
Epoch 34/40
100/100 [==============================] - 16s 156ms/step - loss: 0.2215 - acc: 0.9131 - val_loss: 0.4490 - val_acc: 0.8287
Epoch 35/40
100/100 [==============================] - 16s 156ms/step - loss: 0.2475 - acc: 0.9000 - val_loss: 0.2317 - val_acc: 0.9062
Epoch 36/40
100/100 [==============================] - 16s 159ms/step - loss: 0.2434 - acc: 0.8984 - val_loss: 0.2211 - val_acc: 0.9080
Epoch 37/40
100/100 [==============================] - 15s 155ms/step - loss: 0.2268 - acc: 0.9145 - val_loss: 0.2192 - val_acc: 0.9200
Epoch 38/40
100/100 [==============================] - 16s 156ms/step - loss: 0.2293 - acc: 0.9072 - val_loss: 0.2310 - val_acc: 0.9094
Epoch 39/40
100/100 [==============================] - 16s 156ms/step - loss: 0.2250 - acc: 0.9116 - val_loss: 0.2329 - val_acc: 0.9069
Epoch 40/40
100/100 [==============================] - 16s 158ms/step - loss: 0.2186 - acc: 0.9125 - val_loss: 0.2567 - val_acc: 0.8894

test accuracy: 0.89
test loss: 0.2673813809454441
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 48)        1344      
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 48)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 96)        41568     
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 96)        0         
_________________________________________________________________
flatten_3 (Flatten)          (None, 24576)             0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               12583424  
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 12,626,849
Trainable params: 12,626,849
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 26s 260ms/step - loss: 0.6145 - acc: 0.6986 - val_loss: 0.5479 - val_acc: 0.7338
Epoch 2/40
100/100 [==============================] - 25s 252ms/step - loss: 0.5175 - acc: 0.7497 - val_loss: 0.4723 - val_acc: 0.7719
Epoch 3/40
100/100 [==============================] - 25s 252ms/step - loss: 0.4692 - acc: 0.7791 - val_loss: 0.4305 - val_acc: 0.8044
Epoch 4/40
100/100 [==============================] - 25s 251ms/step - loss: 0.4384 - acc: 0.7878 - val_loss: 0.5083 - val_acc: 0.7394
Epoch 5/40
100/100 [==============================] - 25s 252ms/step - loss: 0.4271 - acc: 0.8037 - val_loss: 0.4198 - val_acc: 0.7981
Epoch 6/40
100/100 [==============================] - 25s 250ms/step - loss: 0.3952 - acc: 0.8194 - val_loss: 0.3598 - val_acc: 0.8411
Epoch 7/40
100/100 [==============================] - 25s 254ms/step - loss: 0.3783 - acc: 0.8316 - val_loss: 0.3468 - val_acc: 0.8506
Epoch 8/40
100/100 [==============================] - 25s 254ms/step - loss: 0.3710 - acc: 0.8344 - val_loss: 0.3506 - val_acc: 0.8525
Epoch 9/40
100/100 [==============================] - 26s 261ms/step - loss: 0.3354 - acc: 0.8609 - val_loss: 0.3156 - val_acc: 0.8694
Epoch 10/40
100/100 [==============================] - 28s 275ms/step - loss: 0.3502 - acc: 0.8456 - val_loss: 0.3974 - val_acc: 0.8087
Epoch 11/40
100/100 [==============================] - 27s 268ms/step - loss: 0.3199 - acc: 0.8706 - val_loss: 0.3074 - val_acc: 0.8755
Epoch 12/40
100/100 [==============================] - 26s 264ms/step - loss: 0.3260 - acc: 0.8569 - val_loss: 0.3046 - val_acc: 0.8644
Epoch 13/40
100/100 [==============================] - 26s 263ms/step - loss: 0.3196 - acc: 0.8594 - val_loss: 0.3490 - val_acc: 0.8469
Epoch 14/40
100/100 [==============================] - 27s 266ms/step - loss: 0.3031 - acc: 0.8701 - val_loss: 0.2882 - val_acc: 0.8856
Epoch 15/40
100/100 [==============================] - 27s 267ms/step - loss: 0.2950 - acc: 0.8775 - val_loss: 0.2773 - val_acc: 0.8762
Epoch 16/40
100/100 [==============================] - 27s 268ms/step - loss: 0.2861 - acc: 0.8839 - val_loss: 0.2859 - val_acc: 0.8786
Epoch 17/40
100/100 [==============================] - 26s 264ms/step - loss: 0.2831 - acc: 0.8794 - val_loss: 0.3034 - val_acc: 0.8738
Epoch 18/40
100/100 [==============================] - 27s 268ms/step - loss: 0.2732 - acc: 0.8847 - val_loss: 0.2695 - val_acc: 0.8838
Epoch 19/40
100/100 [==============================] - 27s 269ms/step - loss: 0.2701 - acc: 0.8831 - val_loss: 0.2636 - val_acc: 0.8925
Epoch 20/40
100/100 [==============================] - 27s 268ms/step - loss: 0.2718 - acc: 0.8875 - val_loss: 0.2446 - val_acc: 0.9038
Epoch 21/40
100/100 [==============================] - 27s 270ms/step - loss: 0.2757 - acc: 0.8863 - val_loss: 0.2794 - val_acc: 0.8836
Epoch 22/40
100/100 [==============================] - 27s 268ms/step - loss: 0.2546 - acc: 0.8947 - val_loss: 0.2753 - val_acc: 0.8844
Epoch 23/40
100/100 [==============================] - 27s 267ms/step - loss: 0.2451 - acc: 0.8949 - val_loss: 0.2482 - val_acc: 0.9094
Epoch 24/40
100/100 [==============================] - 27s 267ms/step - loss: 0.2545 - acc: 0.8941 - val_loss: 0.2825 - val_acc: 0.8819
Epoch 25/40
100/100 [==============================] - 27s 269ms/step - loss: 0.2551 - acc: 0.8984 - val_loss: 0.2271 - val_acc: 0.9100
Epoch 26/40
100/100 [==============================] - 26s 262ms/step - loss: 0.2523 - acc: 0.8981 - val_loss: 0.2636 - val_acc: 0.8805
Epoch 27/40
100/100 [==============================] - 27s 269ms/step - loss: 0.2378 - acc: 0.9028 - val_loss: 0.2581 - val_acc: 0.8906
Epoch 28/40
100/100 [==============================] - 27s 273ms/step - loss: 0.2445 - acc: 0.9044 - val_loss: 0.2245 - val_acc: 0.9050
Epoch 29/40
100/100 [==============================] - 27s 273ms/step - loss: 0.2333 - acc: 0.9038 - val_loss: 0.2297 - val_acc: 0.9137
Epoch 30/40
100/100 [==============================] - 28s 282ms/step - loss: 0.2571 - acc: 0.8981 - val_loss: 0.2609 - val_acc: 0.8944
Epoch 31/40
100/100 [==============================] - 28s 277ms/step - loss: 0.2366 - acc: 0.9066 - val_loss: 0.2391 - val_acc: 0.9105
Epoch 32/40
100/100 [==============================] - 28s 277ms/step - loss: 0.2289 - acc: 0.9078 - val_loss: 0.2182 - val_acc: 0.9113
Epoch 33/40
100/100 [==============================] - 27s 268ms/step - loss: 0.2321 - acc: 0.9078 - val_loss: 0.2277 - val_acc: 0.9131
Epoch 34/40
100/100 [==============================] - 27s 275ms/step - loss: 0.2376 - acc: 0.9004 - val_loss: 0.2618 - val_acc: 0.8956
Epoch 35/40
100/100 [==============================] - 27s 270ms/step - loss: 0.2198 - acc: 0.9109 - val_loss: 0.2438 - val_acc: 0.8981
Epoch 36/40
100/100 [==============================] - 27s 270ms/step - loss: 0.2331 - acc: 0.9075 - val_loss: 0.2617 - val_acc: 0.9011
Epoch 37/40
100/100 [==============================] - 27s 273ms/step - loss: 0.2336 - acc: 0.9072 - val_loss: 0.2282 - val_acc: 0.9131
Epoch 38/40
100/100 [==============================] - 27s 270ms/step - loss: 0.2007 - acc: 0.9234 - val_loss: 0.2227 - val_acc: 0.9144
Epoch 39/40
100/100 [==============================] - 27s 268ms/step - loss: 0.2178 - acc: 0.9088 - val_loss: 0.2890 - val_acc: 0.8875
Epoch 40/40
100/100 [==============================] - 27s 270ms/step - loss: 0.2296 - acc: 0.9081 - val_loss: 0.2331 - val_acc: 0.9056

test accuracy: 0.910625
test loss: 0.23277076236903668
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 64)        1792      
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 64)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 128)       73856     
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 128)       0         
_________________________________________________________________
flatten_4 (Flatten)          (None, 32768)             0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               16777728  
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 16,853,889
Trainable params: 16,853,889
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 39s 395ms/step - loss: 0.6236 - acc: 0.6925 - val_loss: 0.5365 - val_acc: 0.7538
Epoch 2/40
100/100 [==============================] - 38s 378ms/step - loss: 0.5361 - acc: 0.7378 - val_loss: 0.5164 - val_acc: 0.7294
Epoch 3/40
100/100 [==============================] - 38s 380ms/step - loss: 0.4833 - acc: 0.7662 - val_loss: 0.4304 - val_acc: 0.8075
Epoch 4/40
100/100 [==============================] - 39s 390ms/step - loss: 0.4526 - acc: 0.7878 - val_loss: 0.4441 - val_acc: 0.7762
Epoch 5/40
100/100 [==============================] - 39s 386ms/step - loss: 0.4167 - acc: 0.8072 - val_loss: 0.3956 - val_acc: 0.8263
Epoch 6/40
100/100 [==============================] - 38s 383ms/step - loss: 0.4077 - acc: 0.8103 - val_loss: 0.4256 - val_acc: 0.7972
Epoch 7/40
100/100 [==============================] - 38s 380ms/step - loss: 0.3930 - acc: 0.8181 - val_loss: 0.3728 - val_acc: 0.8275
Epoch 8/40
100/100 [==============================] - 39s 389ms/step - loss: 0.3729 - acc: 0.8256 - val_loss: 0.4846 - val_acc: 0.7525
Epoch 9/40
100/100 [==============================] - 38s 378ms/step - loss: 0.3415 - acc: 0.8518 - val_loss: 0.3055 - val_acc: 0.8744
Epoch 10/40
100/100 [==============================] - 38s 384ms/step - loss: 0.3320 - acc: 0.8522 - val_loss: 0.3292 - val_acc: 0.8556
Epoch 11/40
100/100 [==============================] - 38s 379ms/step - loss: 0.3343 - acc: 0.8525 - val_loss: 0.2941 - val_acc: 0.8805
Epoch 12/40
100/100 [==============================] - 39s 386ms/step - loss: 0.3140 - acc: 0.8681 - val_loss: 0.3016 - val_acc: 0.8750
Epoch 13/40
100/100 [==============================] - 38s 383ms/step - loss: 0.3126 - acc: 0.8631 - val_loss: 0.2980 - val_acc: 0.8875
Epoch 14/40
100/100 [==============================] - 38s 375ms/step - loss: 0.3039 - acc: 0.8684 - val_loss: 0.2782 - val_acc: 0.8812
Epoch 15/40
100/100 [==============================] - 37s 366ms/step - loss: 0.2904 - acc: 0.8815 - val_loss: 0.2807 - val_acc: 0.8900
Epoch 16/40
100/100 [==============================] - 37s 367ms/step - loss: 0.2814 - acc: 0.8816 - val_loss: 0.2900 - val_acc: 0.8842
Epoch 17/40
100/100 [==============================] - 36s 365ms/step - loss: 0.2629 - acc: 0.8928 - val_loss: 0.2476 - val_acc: 0.8994
Epoch 18/40
100/100 [==============================] - 36s 365ms/step - loss: 0.2762 - acc: 0.8859 - val_loss: 0.2552 - val_acc: 0.8956
Epoch 19/40
100/100 [==============================] - 36s 364ms/step - loss: 0.2614 - acc: 0.8937 - val_loss: 0.2951 - val_acc: 0.8838
Epoch 20/40
100/100 [==============================] - 36s 361ms/step - loss: 0.2704 - acc: 0.8863 - val_loss: 0.2274 - val_acc: 0.9187
Epoch 21/40
100/100 [==============================] - 36s 363ms/step - loss: 0.2736 - acc: 0.8959 - val_loss: 0.2418 - val_acc: 0.9043
Epoch 22/40
100/100 [==============================] - 36s 364ms/step - loss: 0.2534 - acc: 0.9016 - val_loss: 0.2559 - val_acc: 0.8981
Epoch 23/40
100/100 [==============================] - 37s 366ms/step - loss: 0.2420 - acc: 0.9056 - val_loss: 0.2417 - val_acc: 0.9056
Epoch 24/40
100/100 [==============================] - 36s 365ms/step - loss: 0.2616 - acc: 0.8897 - val_loss: 0.2807 - val_acc: 0.8812
Epoch 25/40
100/100 [==============================] - 36s 365ms/step - loss: 0.2536 - acc: 0.8977 - val_loss: 0.2376 - val_acc: 0.9100
Epoch 26/40
100/100 [==============================] - 36s 365ms/step - loss: 0.2325 - acc: 0.9028 - val_loss: 0.2606 - val_acc: 0.9024
Epoch 27/40
100/100 [==============================] - 36s 364ms/step - loss: 0.2396 - acc: 0.9057 - val_loss: 0.2604 - val_acc: 0.9062
Epoch 28/40
100/100 [==============================] - 37s 365ms/step - loss: 0.2325 - acc: 0.9084 - val_loss: 0.2272 - val_acc: 0.9031
Epoch 29/40
100/100 [==============================] - 36s 365ms/step - loss: 0.2418 - acc: 0.9059 - val_loss: 0.2341 - val_acc: 0.9094
Epoch 30/40
100/100 [==============================] - 36s 362ms/step - loss: 0.2463 - acc: 0.9034 - val_loss: 0.2255 - val_acc: 0.9113
Epoch 31/40
100/100 [==============================] - 39s 388ms/step - loss: 0.2308 - acc: 0.9091 - val_loss: 0.2397 - val_acc: 0.9105
Epoch 32/40
100/100 [==============================] - 39s 386ms/step - loss: 0.2405 - acc: 0.9025 - val_loss: 0.2057 - val_acc: 0.9225
Epoch 33/40
100/100 [==============================] - 39s 394ms/step - loss: 0.2351 - acc: 0.9025 - val_loss: 0.2203 - val_acc: 0.9156
Epoch 34/40
100/100 [==============================] - 38s 383ms/step - loss: 0.2243 - acc: 0.9091 - val_loss: 0.2337 - val_acc: 0.9125
Epoch 35/40
100/100 [==============================] - 37s 371ms/step - loss: 0.2181 - acc: 0.9198 - val_loss: 0.2349 - val_acc: 0.9156
Epoch 36/40
100/100 [==============================] - 38s 377ms/step - loss: 0.2270 - acc: 0.9122 - val_loss: 0.2323 - val_acc: 0.9061
Epoch 37/40
100/100 [==============================] - 43s 429ms/step - loss: 0.2156 - acc: 0.9178 - val_loss: 0.2211 - val_acc: 0.9125
Epoch 38/40
100/100 [==============================] - 48s 479ms/step - loss: 0.2216 - acc: 0.9094 - val_loss: 0.2297 - val_acc: 0.9119
Epoch 39/40
100/100 [==============================] - 48s 481ms/step - loss: 0.2240 - acc: 0.9128 - val_loss: 0.2333 - val_acc: 0.9125
Epoch 40/40
100/100 [==============================] - 50s 496ms/step - loss: 0.2313 - acc: 0.9096 - val_loss: 0.2522 - val_acc: 0.9006

test accuracy: 0.905
test loss: 0.24873999908566474
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 80)        2240      
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 80)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 160)       115360    
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 160)       0         
_________________________________________________________________
flatten_5 (Flatten)          (None, 40960)             0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               20972032  
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 21,090,145
Trainable params: 21,090,145
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 68s 679ms/step - loss: 0.6108 - acc: 0.6994 - val_loss: 0.5312 - val_acc: 0.7394
Epoch 2/40
100/100 [==============================] - 65s 652ms/step - loss: 0.5142 - acc: 0.7438 - val_loss: 0.4943 - val_acc: 0.7562
Epoch 3/40
100/100 [==============================] - 65s 653ms/step - loss: 0.4847 - acc: 0.7553 - val_loss: 0.4541 - val_acc: 0.7963
Epoch 4/40
100/100 [==============================] - 65s 653ms/step - loss: 0.4455 - acc: 0.7909 - val_loss: 0.4846 - val_acc: 0.7569
Epoch 5/40
100/100 [==============================] - 66s 657ms/step - loss: 0.4233 - acc: 0.7997 - val_loss: 0.4806 - val_acc: 0.7788
Epoch 6/40
100/100 [==============================] - 65s 651ms/step - loss: 0.3887 - acc: 0.8164 - val_loss: 0.3601 - val_acc: 0.8411
Epoch 7/40
100/100 [==============================] - 66s 664ms/step - loss: 0.3875 - acc: 0.8175 - val_loss: 0.3388 - val_acc: 0.8525
Epoch 8/40
100/100 [==============================] - 66s 660ms/step - loss: 0.3558 - acc: 0.8434 - val_loss: 0.4438 - val_acc: 0.7906
Epoch 9/40
100/100 [==============================] - 64s 642ms/step - loss: 0.3437 - acc: 0.8438 - val_loss: 0.3211 - val_acc: 0.8644
Epoch 10/40
100/100 [==============================] - 66s 660ms/step - loss: 0.3471 - acc: 0.8494 - val_loss: 0.3114 - val_acc: 0.8725
Epoch 11/40
100/100 [==============================] - 65s 652ms/step - loss: 0.3273 - acc: 0.8547 - val_loss: 0.3158 - val_acc: 0.8642
Epoch 12/40
100/100 [==============================] - 66s 660ms/step - loss: 0.3266 - acc: 0.8553 - val_loss: 0.3157 - val_acc: 0.8669
Epoch 13/40
100/100 [==============================] - 66s 657ms/step - loss: 0.3133 - acc: 0.8647 - val_loss: 0.3253 - val_acc: 0.8731
Epoch 14/40
100/100 [==============================] - 65s 646ms/step - loss: 0.3061 - acc: 0.8691 - val_loss: 0.3198 - val_acc: 0.8600
Epoch 15/40
100/100 [==============================] - 67s 667ms/step - loss: 0.2948 - acc: 0.8749 - val_loss: 0.2740 - val_acc: 0.8831
Epoch 16/40
100/100 [==============================] - 65s 646ms/step - loss: 0.2903 - acc: 0.8784 - val_loss: 0.3086 - val_acc: 0.8761
Epoch 17/40
100/100 [==============================] - 65s 653ms/step - loss: 0.2850 - acc: 0.8803 - val_loss: 0.2966 - val_acc: 0.8675
Epoch 18/40
100/100 [==============================] - 66s 660ms/step - loss: 0.2794 - acc: 0.8856 - val_loss: 0.2795 - val_acc: 0.8844
Epoch 19/40
100/100 [==============================] - 65s 654ms/step - loss: 0.2664 - acc: 0.8903 - val_loss: 0.2350 - val_acc: 0.9025
Epoch 20/40
100/100 [==============================] - 66s 658ms/step - loss: 0.2794 - acc: 0.8831 - val_loss: 0.2463 - val_acc: 0.9006
Epoch 21/40
100/100 [==============================] - 65s 655ms/step - loss: 0.2464 - acc: 0.8912 - val_loss: 0.2833 - val_acc: 0.8880
Epoch 22/40
100/100 [==============================] - 65s 651ms/step - loss: 0.2591 - acc: 0.8894 - val_loss: 0.2518 - val_acc: 0.9062
Epoch 23/40
100/100 [==============================] - 66s 658ms/step - loss: 0.2580 - acc: 0.8962 - val_loss: 0.2563 - val_acc: 0.8919
Epoch 24/40
100/100 [==============================] - 65s 648ms/step - loss: 0.2436 - acc: 0.8966 - val_loss: 0.2321 - val_acc: 0.9031
Epoch 25/40
100/100 [==============================] - 65s 652ms/step - loss: 0.2629 - acc: 0.8891 - val_loss: 0.2322 - val_acc: 0.9012
Epoch 26/40
100/100 [==============================] - 66s 657ms/step - loss: 0.2371 - acc: 0.9069 - val_loss: 0.2920 - val_acc: 0.8792
Epoch 27/40
100/100 [==============================] - 65s 654ms/step - loss: 0.2389 - acc: 0.9031 - val_loss: 0.2391 - val_acc: 0.9100
Epoch 28/40
100/100 [==============================] - 65s 650ms/step - loss: 0.2517 - acc: 0.8944 - val_loss: 0.2446 - val_acc: 0.9019
Epoch 29/40
100/100 [==============================] - 67s 674ms/step - loss: 0.2461 - acc: 0.9009 - val_loss: 0.2283 - val_acc: 0.9069
Epoch 30/40
100/100 [==============================] - 65s 652ms/step - loss: 0.2312 - acc: 0.9081 - val_loss: 0.2567 - val_acc: 0.8931
Epoch 31/40
100/100 [==============================] - 64s 644ms/step - loss: 0.2386 - acc: 0.9028 - val_loss: 0.2168 - val_acc: 0.9111
Epoch 32/40
100/100 [==============================] - 65s 653ms/step - loss: 0.2110 - acc: 0.9163 - val_loss: 0.2430 - val_acc: 0.9100
Epoch 33/40
100/100 [==============================] - 65s 650ms/step - loss: 0.2445 - acc: 0.9022 - val_loss: 0.2600 - val_acc: 0.8981
Epoch 34/40
100/100 [==============================] - 57s 574ms/step - loss: 0.2323 - acc: 0.9124 - val_loss: 0.2377 - val_acc: 0.9025
Epoch 35/40
100/100 [==============================] - 59s 592ms/step - loss: 0.2303 - acc: 0.9025 - val_loss: 0.2415 - val_acc: 0.9062
Epoch 36/40
100/100 [==============================] - 67s 667ms/step - loss: 0.2328 - acc: 0.9116 - val_loss: 0.2450 - val_acc: 0.9061
Epoch 37/40
100/100 [==============================] - 66s 655ms/step - loss: 0.2262 - acc: 0.9103 - val_loss: 0.2226 - val_acc: 0.9025
Epoch 38/40
100/100 [==============================] - 65s 653ms/step - loss: 0.2209 - acc: 0.9125 - val_loss: 0.2037 - val_acc: 0.9375
Epoch 39/40
100/100 [==============================] - 65s 655ms/step - loss: 0.2089 - acc: 0.9150 - val_loss: 0.2505 - val_acc: 0.8912
Epoch 40/40
100/100 [==============================] - 67s 670ms/step - loss: 0.2235 - acc: 0.9091 - val_loss: 0.2511 - val_acc: 0.9006

test accuracy: 0.9103125
test loss: 0.24727061174809933

1.3.3 Architecture Choice -- Determine the number of units for the dense layers

Here, we set the number of convolution-subsambling pairs as 2, and use 16 maps in the first convolutional layer and 32 maps in the second convolutional layer. I will try different number of units for the dense layers: 16, 32, 64, 128, 256, 512, and 1024.

In [19]:
def CNN_Architecture_Selector_3 (activation_function_1, activation_function_2, optimizer_type, epoch_number):
    K.clear_session()
    model = [0] * 7
    
    for j in range(0, 7):
        model[j] = models.Sequential()
        model[j].add(layers.Conv2D(16, (3, 3), padding='same', activation=activation_function_1,
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool1'))
        model[j].add(layers.Conv2D(32, (3, 3), padding='same', activation=activation_function_1, name = 'conv2'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool2'))
        model[j].add(layers.Flatten())

        model[j].add(layers.Dense(2**(j+4), kernel_initializer='glorot_uniform', activation=activation_function_1, name='fc1'))
        model[j].add(layers.Dense(1, kernel_initializer='glorot_uniform', activation=activation_function_2, name='fc2'))
        model[j].compile(loss='binary_crossentropy', optimizer = optimizer_type, metrics=['accuracy'])
    
        model[j].summary()
        
        # Fit the model
        history = model[j].fit_generator(
        train_generator,
        steps_per_epoch=100,
        epochs=epoch_number,
        validation_data=validation_generator,
        validation_steps=50,
        verbose=1, 
        # callbacks=[TrainValTensorBoard("logs/{}".format(time()), write_graph=True)]
        )
        test_loss, test_acc = model[j].evaluate_generator(test_generator, steps=100)
        print('\ntest accuracy:', test_acc)
        print('test loss:', test_loss)
    
        acc = history.history['acc']
        val_acc = history.history['val_acc']
        loss = history.history['loss']
        val_loss = history.history['val_loss']
        epochs = range(len(acc))
    
        plt.plot(epochs, acc, 'bo', label='Training acc')
        plt.plot(epochs, val_acc, 'g-', label='Validation acc')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Accuracy")
        plt.title('Training and validation accuracy')
        plt.legend()
        plt.figure()

        plt.plot(epochs, loss, 'bo', label='Training loss')
        plt.plot(epochs, val_loss, 'g-', label='Validation loss')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Loss")
        plt.title('Training and validation loss')
        plt.legend()
        
        plt.show()
In [20]:
sgd = optimizers.SGD(lr = 0.05, decay=1e-5, momentum=0.9, nesterov=True)
CNN_Architecture_Selector_3('relu', 'sigmoid', 'sgd', 40)
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 16)                131088    
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 17        
=================================================================
Total params: 136,193
Trainable params: 136,193
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 9s 85ms/step - loss: 0.6689 - acc: 0.6556 - val_loss: 0.6198 - val_acc: 0.7081
Epoch 2/40
100/100 [==============================] - 7s 73ms/step - loss: 0.5573 - acc: 0.7491 - val_loss: 0.4950 - val_acc: 0.7631
Epoch 3/40
100/100 [==============================] - 7s 72ms/step - loss: 0.4964 - acc: 0.7605 - val_loss: 0.4799 - val_acc: 0.7837
Epoch 4/40
100/100 [==============================] - 7s 71ms/step - loss: 0.4711 - acc: 0.7728 - val_loss: 0.4472 - val_acc: 0.7863
Epoch 5/40
100/100 [==============================] - 8s 81ms/step - loss: 0.4309 - acc: 0.7978 - val_loss: 0.4073 - val_acc: 0.8131
Epoch 6/40
100/100 [==============================] - 7s 69ms/step - loss: 0.4107 - acc: 0.8037 - val_loss: 0.3966 - val_acc: 0.8166
Epoch 7/40
100/100 [==============================] - 8s 79ms/step - loss: 0.3802 - acc: 0.8325 - val_loss: 0.4677 - val_acc: 0.7712
Epoch 8/40
100/100 [==============================] - 7s 65ms/step - loss: 0.3974 - acc: 0.8173 - val_loss: 0.3622 - val_acc: 0.8381
Epoch 9/40
100/100 [==============================] - 7s 66ms/step - loss: 0.3885 - acc: 0.8278 - val_loss: 0.4032 - val_acc: 0.8069
Epoch 10/40
100/100 [==============================] - 6s 64ms/step - loss: 0.3622 - acc: 0.8406 - val_loss: 0.3452 - val_acc: 0.8369
Epoch 11/40
100/100 [==============================] - 7s 67ms/step - loss: 0.3650 - acc: 0.8388 - val_loss: 0.3259 - val_acc: 0.8548
Epoch 12/40
100/100 [==============================] - 8s 75ms/step - loss: 0.3526 - acc: 0.8387 - val_loss: 0.3144 - val_acc: 0.8706
Epoch 13/40
100/100 [==============================] - 7s 65ms/step - loss: 0.3219 - acc: 0.8594 - val_loss: 0.3809 - val_acc: 0.8281
Epoch 14/40
100/100 [==============================] - 7s 73ms/step - loss: 0.3407 - acc: 0.8563 - val_loss: 0.3237 - val_acc: 0.8631
Epoch 15/40
100/100 [==============================] - 7s 67ms/step - loss: 0.3062 - acc: 0.8778 - val_loss: 0.2847 - val_acc: 0.8869
Epoch 16/40
100/100 [==============================] - 8s 76ms/step - loss: 0.3190 - acc: 0.8659 - val_loss: 0.4000 - val_acc: 0.8141
Epoch 17/40
100/100 [==============================] - 8s 76ms/step - loss: 0.2884 - acc: 0.8769 - val_loss: 0.2917 - val_acc: 0.8894
Epoch 18/40
100/100 [==============================] - 8s 75ms/step - loss: 0.3137 - acc: 0.8685 - val_loss: 0.2709 - val_acc: 0.8800
Epoch 19/40
100/100 [==============================] - 7s 72ms/step - loss: 0.2910 - acc: 0.8781 - val_loss: 0.2758 - val_acc: 0.8825
Epoch 20/40
100/100 [==============================] - 7s 71ms/step - loss: 0.2906 - acc: 0.8725 - val_loss: 0.3171 - val_acc: 0.8725
Epoch 21/40
100/100 [==============================] - 7s 72ms/step - loss: 0.2771 - acc: 0.8916 - val_loss: 0.2861 - val_acc: 0.8892
Epoch 22/40
100/100 [==============================] - 7s 72ms/step - loss: 0.2613 - acc: 0.8847 - val_loss: 0.2790 - val_acc: 0.8900
Epoch 23/40
100/100 [==============================] - 7s 66ms/step - loss: 0.2556 - acc: 0.8909 - val_loss: 0.3026 - val_acc: 0.8744
Epoch 24/40
100/100 [==============================] - 6s 65ms/step - loss: 0.2751 - acc: 0.8889 - val_loss: 0.2447 - val_acc: 0.9038
Epoch 25/40
100/100 [==============================] - 7s 67ms/step - loss: 0.2864 - acc: 0.8828 - val_loss: 0.2586 - val_acc: 0.9019
Epoch 26/40
100/100 [==============================] - 7s 69ms/step - loss: 0.2474 - acc: 0.9053 - val_loss: 0.2949 - val_acc: 0.8811
Epoch 27/40
100/100 [==============================] - 6s 65ms/step - loss: 0.2761 - acc: 0.8897 - val_loss: 0.2276 - val_acc: 0.9031
Epoch 28/40
100/100 [==============================] - 7s 67ms/step - loss: 0.2424 - acc: 0.9006 - val_loss: 0.2786 - val_acc: 0.8938
Epoch 29/40
100/100 [==============================] - 6s 64ms/step - loss: 0.2535 - acc: 0.8972 - val_loss: 0.2434 - val_acc: 0.8994
Epoch 30/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2527 - acc: 0.9033 - val_loss: 0.3358 - val_acc: 0.8512
Epoch 31/40
100/100 [==============================] - 7s 66ms/step - loss: 0.2600 - acc: 0.8962 - val_loss: 0.2414 - val_acc: 0.9036
Epoch 32/40
100/100 [==============================] - 7s 71ms/step - loss: 0.2447 - acc: 0.8969 - val_loss: 0.2411 - val_acc: 0.8975
Epoch 33/40
100/100 [==============================] - 8s 76ms/step - loss: 0.2432 - acc: 0.9066 - val_loss: 0.2233 - val_acc: 0.9181
Epoch 34/40
100/100 [==============================] - 7s 74ms/step - loss: 0.2180 - acc: 0.9159 - val_loss: 0.2604 - val_acc: 0.9000
Epoch 35/40
100/100 [==============================] - 7s 67ms/step - loss: 0.2415 - acc: 0.8991 - val_loss: 0.2651 - val_acc: 0.8944
Epoch 36/40
100/100 [==============================] - 7s 71ms/step - loss: 0.2355 - acc: 0.9084 - val_loss: 0.2049 - val_acc: 0.9180
Epoch 37/40
100/100 [==============================] - 7s 66ms/step - loss: 0.2455 - acc: 0.9031 - val_loss: 0.3247 - val_acc: 0.8750
Epoch 38/40
100/100 [==============================] - 7s 66ms/step - loss: 0.2487 - acc: 0.8944 - val_loss: 0.2136 - val_acc: 0.9144
Epoch 39/40
100/100 [==============================] - 7s 71ms/step - loss: 0.2218 - acc: 0.9087 - val_loss: 0.2700 - val_acc: 0.8862
Epoch 40/40
100/100 [==============================] - 7s 65ms/step - loss: 0.2383 - acc: 0.9025 - val_loss: 0.2465 - val_acc: 0.9012

test accuracy: 0.91375
test loss: 0.22367523226886987
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_2 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 32)                262176    
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 33        
=================================================================
Total params: 267,297
Trainable params: 267,297
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 9s 89ms/step - loss: 0.6646 - acc: 0.6200 - val_loss: 0.6402 - val_acc: 0.6637
Epoch 2/40
100/100 [==============================] - 8s 76ms/step - loss: 0.5796 - acc: 0.7122 - val_loss: 0.5347 - val_acc: 0.7381
Epoch 3/40
100/100 [==============================] - 7s 73ms/step - loss: 0.5274 - acc: 0.7384 - val_loss: 0.4820 - val_acc: 0.7831
Epoch 4/40
100/100 [==============================] - 8s 77ms/step - loss: 0.5000 - acc: 0.7582 - val_loss: 0.4551 - val_acc: 0.7800
Epoch 5/40
100/100 [==============================] - 7s 69ms/step - loss: 0.4669 - acc: 0.7837 - val_loss: 0.5067 - val_acc: 0.7475
Epoch 6/40
100/100 [==============================] - 8s 78ms/step - loss: 0.4289 - acc: 0.8047 - val_loss: 0.4090 - val_acc: 0.8123
Epoch 7/40
100/100 [==============================] - 7s 71ms/step - loss: 0.4130 - acc: 0.8113 - val_loss: 0.4258 - val_acc: 0.8087
Epoch 8/40
100/100 [==============================] - 7s 71ms/step - loss: 0.4041 - acc: 0.8139 - val_loss: 0.3854 - val_acc: 0.8325
Epoch 9/40
100/100 [==============================] - 7s 73ms/step - loss: 0.3813 - acc: 0.8319 - val_loss: 0.3570 - val_acc: 0.8406
Epoch 10/40
100/100 [==============================] - 7s 67ms/step - loss: 0.3784 - acc: 0.8266 - val_loss: 0.3390 - val_acc: 0.8681
Epoch 11/40
100/100 [==============================] - 7s 71ms/step - loss: 0.3392 - acc: 0.8600 - val_loss: 0.3086 - val_acc: 0.8717
Epoch 12/40
100/100 [==============================] - 7s 70ms/step - loss: 0.3287 - acc: 0.8594 - val_loss: 0.3132 - val_acc: 0.8706
Epoch 13/40
100/100 [==============================] - 7s 71ms/step - loss: 0.3181 - acc: 0.8637 - val_loss: 0.3249 - val_acc: 0.8650
Epoch 14/40
100/100 [==============================] - 7s 74ms/step - loss: 0.3333 - acc: 0.8631 - val_loss: 0.2849 - val_acc: 0.8900
Epoch 15/40
100/100 [==============================] - 7s 71ms/step - loss: 0.3150 - acc: 0.8675 - val_loss: 0.3153 - val_acc: 0.8625
Epoch 16/40
100/100 [==============================] - 7s 72ms/step - loss: 0.3102 - acc: 0.8678 - val_loss: 0.2840 - val_acc: 0.8892
Epoch 17/40
100/100 [==============================] - 7s 72ms/step - loss: 0.2867 - acc: 0.8764 - val_loss: 0.3043 - val_acc: 0.8688
Epoch 18/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2821 - acc: 0.8819 - val_loss: 0.2491 - val_acc: 0.9000
Epoch 19/40
100/100 [==============================] - 7s 71ms/step - loss: 0.3024 - acc: 0.8781 - val_loss: 0.2751 - val_acc: 0.8888
Epoch 20/40
100/100 [==============================] - 8s 76ms/step - loss: 0.2796 - acc: 0.8825 - val_loss: 0.2743 - val_acc: 0.8931
Epoch 21/40
100/100 [==============================] - 7s 70ms/step - loss: 0.2840 - acc: 0.8841 - val_loss: 0.3224 - val_acc: 0.8605
Epoch 22/40
100/100 [==============================] - 7s 75ms/step - loss: 0.2712 - acc: 0.8900 - val_loss: 0.3014 - val_acc: 0.8800
Epoch 23/40
100/100 [==============================] - 7s 72ms/step - loss: 0.2647 - acc: 0.8912 - val_loss: 0.6156 - val_acc: 0.7444
Epoch 24/40
100/100 [==============================] - 7s 73ms/step - loss: 0.2631 - acc: 0.8931 - val_loss: 0.2384 - val_acc: 0.9056
Epoch 25/40
100/100 [==============================] - 7s 65ms/step - loss: 0.2679 - acc: 0.8822 - val_loss: 0.2481 - val_acc: 0.9069
Epoch 26/40
100/100 [==============================] - 7s 67ms/step - loss: 0.2615 - acc: 0.8937 - val_loss: 0.2765 - val_acc: 0.8911
Epoch 27/40
100/100 [==============================] - 7s 67ms/step - loss: 0.2508 - acc: 0.9003 - val_loss: 0.2461 - val_acc: 0.9006
Epoch 28/40
100/100 [==============================] - 7s 66ms/step - loss: 0.2562 - acc: 0.9006 - val_loss: 0.2222 - val_acc: 0.9137
Epoch 29/40
100/100 [==============================] - 7s 74ms/step - loss: 0.2440 - acc: 0.8997 - val_loss: 0.2732 - val_acc: 0.8875
Epoch 30/40
100/100 [==============================] - 8s 77ms/step - loss: 0.2471 - acc: 0.9012 - val_loss: 0.2362 - val_acc: 0.9062
Epoch 31/40
100/100 [==============================] - 8s 75ms/step - loss: 0.2437 - acc: 0.8991 - val_loss: 0.2295 - val_acc: 0.9011
Epoch 32/40
100/100 [==============================] - 7s 66ms/step - loss: 0.2456 - acc: 0.9016 - val_loss: 0.2615 - val_acc: 0.8856
Epoch 33/40
100/100 [==============================] - 6s 64ms/step - loss: 0.2403 - acc: 0.9062 - val_loss: 0.2209 - val_acc: 0.9062
Epoch 34/40
100/100 [==============================] - 8s 75ms/step - loss: 0.2436 - acc: 0.8959 - val_loss: 0.2647 - val_acc: 0.8950
Epoch 35/40
100/100 [==============================] - 7s 71ms/step - loss: 0.2465 - acc: 0.9028 - val_loss: 0.2696 - val_acc: 0.8988
Epoch 36/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2407 - acc: 0.9039 - val_loss: 0.2689 - val_acc: 0.8892
Epoch 37/40
100/100 [==============================] - 7s 65ms/step - loss: 0.2338 - acc: 0.9071 - val_loss: 0.2300 - val_acc: 0.9050
Epoch 38/40
100/100 [==============================] - 7s 70ms/step - loss: 0.2225 - acc: 0.9113 - val_loss: 0.2304 - val_acc: 0.9069
Epoch 39/40
100/100 [==============================] - 7s 72ms/step - loss: 0.2290 - acc: 0.9094 - val_loss: 0.2434 - val_acc: 0.9062
Epoch 40/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2363 - acc: 0.8997 - val_loss: 0.2289 - val_acc: 0.9081

test accuracy: 0.9078125
test loss: 0.22470478739589453
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_3 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 64)                524352    
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 65        
=================================================================
Total params: 529,505
Trainable params: 529,505
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 9s 92ms/step - loss: 0.6311 - acc: 0.6528 - val_loss: 0.5753 - val_acc: 0.6863
Epoch 2/40
100/100 [==============================] - 11s 113ms/step - loss: 0.5513 - acc: 0.7284 - val_loss: 0.5024 - val_acc: 0.7725
Epoch 3/40
100/100 [==============================] - 13s 130ms/step - loss: 0.4980 - acc: 0.7534 - val_loss: 0.4593 - val_acc: 0.7831
Epoch 4/40
100/100 [==============================] - 10s 99ms/step - loss: 0.4557 - acc: 0.7782 - val_loss: 0.4387 - val_acc: 0.8156
Epoch 5/40
100/100 [==============================] - 9s 92ms/step - loss: 0.4453 - acc: 0.7897 - val_loss: 0.4356 - val_acc: 0.7925
Epoch 6/40
100/100 [==============================] - 10s 96ms/step - loss: 0.4150 - acc: 0.8025 - val_loss: 0.4219 - val_acc: 0.7972
Epoch 7/40
100/100 [==============================] - 9s 87ms/step - loss: 0.4074 - acc: 0.8141 - val_loss: 0.4140 - val_acc: 0.8056
Epoch 8/40
100/100 [==============================] - 8s 82ms/step - loss: 0.3844 - acc: 0.8191 - val_loss: 0.3641 - val_acc: 0.8387
Epoch 9/40
100/100 [==============================] - 9s 86ms/step - loss: 0.3758 - acc: 0.8284 - val_loss: 0.4062 - val_acc: 0.8206
Epoch 10/40
100/100 [==============================] - 8s 77ms/step - loss: 0.3736 - acc: 0.8317 - val_loss: 0.3683 - val_acc: 0.8350
Epoch 11/40
100/100 [==============================] - 8s 76ms/step - loss: 0.3344 - acc: 0.8500 - val_loss: 0.3155 - val_acc: 0.8686
Epoch 12/40
100/100 [==============================] - 8s 83ms/step - loss: 0.3334 - acc: 0.8500 - val_loss: 0.3273 - val_acc: 0.8600
Epoch 13/40
100/100 [==============================] - 10s 99ms/step - loss: 0.3481 - acc: 0.8491 - val_loss: 0.3251 - val_acc: 0.8669
Epoch 14/40
100/100 [==============================] - 7s 74ms/step - loss: 0.3323 - acc: 0.8538 - val_loss: 0.2833 - val_acc: 0.8781
Epoch 15/40
100/100 [==============================] - 7s 74ms/step - loss: 0.3018 - acc: 0.8756 - val_loss: 0.3167 - val_acc: 0.8662
Epoch 16/40
100/100 [==============================] - 9s 94ms/step - loss: 0.3005 - acc: 0.8728 - val_loss: 0.2962 - val_acc: 0.8736
Epoch 17/40
100/100 [==============================] - 10s 101ms/step - loss: 0.3181 - acc: 0.8650 - val_loss: 0.3029 - val_acc: 0.8538
Epoch 18/40
100/100 [==============================] - 9s 93ms/step - loss: 0.2881 - acc: 0.8728 - val_loss: 0.2990 - val_acc: 0.8700
Epoch 19/40
100/100 [==============================] - 8s 83ms/step - loss: 0.2796 - acc: 0.8846 - val_loss: 0.2831 - val_acc: 0.8838
Epoch 20/40
100/100 [==============================] - 9s 94ms/step - loss: 0.2698 - acc: 0.8803 - val_loss: 0.2925 - val_acc: 0.8769
Epoch 21/40
100/100 [==============================] - 8s 81ms/step - loss: 0.2739 - acc: 0.8859 - val_loss: 0.2802 - val_acc: 0.8930
Epoch 22/40
100/100 [==============================] - 7s 74ms/step - loss: 0.2655 - acc: 0.8952 - val_loss: 0.2575 - val_acc: 0.8944
Epoch 23/40
100/100 [==============================] - 8s 77ms/step - loss: 0.2774 - acc: 0.8878 - val_loss: 0.2846 - val_acc: 0.8862
Epoch 24/40
100/100 [==============================] - 8s 75ms/step - loss: 0.2732 - acc: 0.8922 - val_loss: 0.2540 - val_acc: 0.8894oss: 0.2675 - ac
Epoch 25/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2545 - acc: 0.8981 - val_loss: 0.2754 - val_acc: 0.8938
Epoch 26/40
100/100 [==============================] - 7s 74ms/step - loss: 0.2684 - acc: 0.8917 - val_loss: 0.2594 - val_acc: 0.8849
Epoch 27/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2454 - acc: 0.8994 - val_loss: 0.2455 - val_acc: 0.9044
Epoch 28/40
100/100 [==============================] - 7s 72ms/step - loss: 0.2347 - acc: 0.9125 - val_loss: 0.2343 - val_acc: 0.9062
Epoch 29/40
100/100 [==============================] - 8s 80ms/step - loss: 0.2444 - acc: 0.9019 - val_loss: 0.3505 - val_acc: 0.8456
Epoch 30/40
100/100 [==============================] - 7s 72ms/step - loss: 0.2441 - acc: 0.8959 - val_loss: 0.2221 - val_acc: 0.9056
Epoch 31/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2674 - acc: 0.8928 - val_loss: 0.2690 - val_acc: 0.8899 - lo - ETA
Epoch 32/40
100/100 [==============================] - 7s 73ms/step - loss: 0.2451 - acc: 0.8994 - val_loss: 0.2260 - val_acc: 0.9094
Epoch 33/40
100/100 [==============================] - 8s 84ms/step - loss: 0.2271 - acc: 0.9150 - val_loss: 0.2349 - val_acc: 0.9038
Epoch 34/40
100/100 [==============================] - 8s 83ms/step - loss: 0.2475 - acc: 0.9025 - val_loss: 0.2556 - val_acc: 0.9000
Epoch 35/40
100/100 [==============================] - 8s 82ms/step - loss: 0.2217 - acc: 0.9124 - val_loss: 0.2172 - val_acc: 0.9206
Epoch 36/40
100/100 [==============================] - 9s 87ms/step - loss: 0.2201 - acc: 0.9097 - val_loss: 0.2924 - val_acc: 0.8686
Epoch 37/40
100/100 [==============================] - 8s 76ms/step - loss: 0.2164 - acc: 0.9189 - val_loss: 0.2037 - val_acc: 0.9156
Epoch 38/40
100/100 [==============================] - 7s 71ms/step - loss: 0.2357 - acc: 0.9034 - val_loss: 0.2229 - val_acc: 0.9100
Epoch 39/40
100/100 [==============================] - 8s 76ms/step - loss: 0.2328 - acc: 0.9066 - val_loss: 0.2328 - val_acc: 0.9006
Epoch 40/40
100/100 [==============================] - 8s 77ms/step - loss: 0.2246 - acc: 0.9113 - val_loss: 0.2386 - val_acc: 0.9050

test accuracy: 0.9128125
test loss: 0.22373965442180632
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_4 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 128)               1048704   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 129       
=================================================================
Total params: 1,053,921
Trainable params: 1,053,921
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 11s 107ms/step - loss: 0.6249 - acc: 0.6784 - val_loss: 0.5428 - val_acc: 0.7538
Epoch 2/40
100/100 [==============================] - 8s 82ms/step - loss: 0.5286 - acc: 0.7428 - val_loss: 0.5384 - val_acc: 0.7319
Epoch 3/40
100/100 [==============================] - 8s 82ms/step - loss: 0.4890 - acc: 0.7609 - val_loss: 0.4851 - val_acc: 0.7775
Epoch 4/40
100/100 [==============================] - 9s 94ms/step - loss: 0.4450 - acc: 0.7807 - val_loss: 0.4164 - val_acc: 0.8019
Epoch 5/40
100/100 [==============================] - 8s 83ms/step - loss: 0.4468 - acc: 0.7850 - val_loss: 0.4188 - val_acc: 0.7987
Epoch 6/40
100/100 [==============================] - 9s 85ms/step - loss: 0.4232 - acc: 0.7963 - val_loss: 0.3779 - val_acc: 0.8360
Epoch 7/40
100/100 [==============================] - 8s 81ms/step - loss: 0.4020 - acc: 0.8109 - val_loss: 0.3603 - val_acc: 0.8525
Epoch 8/40
100/100 [==============================] - 8s 83ms/step - loss: 0.3813 - acc: 0.8269 - val_loss: 0.3921 - val_acc: 0.8306
Epoch 9/40
100/100 [==============================] - 8s 80ms/step - loss: 0.3509 - acc: 0.8450 - val_loss: 0.3737 - val_acc: 0.8337
Epoch 10/40
100/100 [==============================] - 8s 83ms/step - loss: 0.3821 - acc: 0.8307 - val_loss: 0.3580 - val_acc: 0.8363
Epoch 11/40
100/100 [==============================] - 9s 85ms/step - loss: 0.3496 - acc: 0.8511 - val_loss: 0.3243 - val_acc: 0.8717
Epoch 12/40
100/100 [==============================] - 8s 81ms/step - loss: 0.3580 - acc: 0.8469 - val_loss: 0.3456 - val_acc: 0.8425
Epoch 13/40
100/100 [==============================] - 8s 82ms/step - loss: 0.3315 - acc: 0.8550 - val_loss: 0.3103 - val_acc: 0.8600
Epoch 14/40
100/100 [==============================] - 8s 80ms/step - loss: 0.3076 - acc: 0.8694 - val_loss: 0.3072 - val_acc: 0.8762
Epoch 15/40
100/100 [==============================] - 8s 80ms/step - loss: 0.3065 - acc: 0.8675 - val_loss: 0.2751 - val_acc: 0.8875
Epoch 16/40
100/100 [==============================] - 8s 79ms/step - loss: 0.3001 - acc: 0.8681 - val_loss: 0.2897 - val_acc: 0.8705
Epoch 17/40
100/100 [==============================] - 8s 81ms/step - loss: 0.2951 - acc: 0.8694 - val_loss: 0.4087 - val_acc: 0.8250
Epoch 18/40
100/100 [==============================] - 8s 81ms/step - loss: 0.2952 - acc: 0.8715 - val_loss: 0.2884 - val_acc: 0.8800
Epoch 19/40
100/100 [==============================] - 9s 87ms/step - loss: 0.2885 - acc: 0.8769 - val_loss: 0.2808 - val_acc: 0.8906
Epoch 20/40
100/100 [==============================] - 8s 83ms/step - loss: 0.3067 - acc: 0.8738 - val_loss: 0.2833 - val_acc: 0.8781
Epoch 21/40
100/100 [==============================] - 8s 79ms/step - loss: 0.2925 - acc: 0.8744 - val_loss: 0.2792 - val_acc: 0.8811
Epoch 22/40
100/100 [==============================] - 8s 80ms/step - loss: 0.2910 - acc: 0.8853 - val_loss: 0.3116 - val_acc: 0.8769
Epoch 23/40
100/100 [==============================] - 8s 81ms/step - loss: 0.2467 - acc: 0.8991 - val_loss: 0.2579 - val_acc: 0.8950
Epoch 24/40
100/100 [==============================] - 8s 81ms/step - loss: 0.2673 - acc: 0.8886 - val_loss: 0.2703 - val_acc: 0.8869
Epoch 25/40
100/100 [==============================] - 9s 87ms/step - loss: 0.2704 - acc: 0.8866 - val_loss: 0.2464 - val_acc: 0.8962
Epoch 26/40
100/100 [==============================] - 8s 80ms/step - loss: 0.2617 - acc: 0.8876 - val_loss: 0.2971 - val_acc: 0.8792
Epoch 27/40
100/100 [==============================] - 8s 81ms/step - loss: 0.2564 - acc: 0.8969 - val_loss: 0.2513 - val_acc: 0.8950
Epoch 28/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2537 - acc: 0.8941 - val_loss: 0.3080 - val_acc: 0.8594
Epoch 29/40
100/100 [==============================] - 8s 80ms/step - loss: 0.2654 - acc: 0.8894 - val_loss: 0.3397 - val_acc: 0.8444
Epoch 30/40
100/100 [==============================] - 8s 85ms/step - loss: 0.2376 - acc: 0.9066 - val_loss: 0.2398 - val_acc: 0.9012
Epoch 31/40
100/100 [==============================] - 10s 102ms/step - loss: 0.2601 - acc: 0.8908 - val_loss: 0.2502 - val_acc: 0.8942
Epoch 32/40
100/100 [==============================] - 8s 77ms/step - loss: 0.2580 - acc: 0.8919 - val_loss: 0.2438 - val_acc: 0.9044
Epoch 33/40
100/100 [==============================] - 8s 82ms/step - loss: 0.2467 - acc: 0.8972 - val_loss: 0.2425 - val_acc: 0.9087
Epoch 34/40
100/100 [==============================] - 8s 76ms/step - loss: 0.2423 - acc: 0.9034 - val_loss: 0.2213 - val_acc: 0.9062 1s - los
Epoch 35/40
100/100 [==============================] - 8s 80ms/step - loss: 0.2337 - acc: 0.9103 - val_loss: 0.2366 - val_acc: 0.9094
Epoch 36/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2348 - acc: 0.9009 - val_loss: 0.2257 - val_acc: 0.9230
Epoch 37/40
100/100 [==============================] - 8s 79ms/step - loss: 0.2424 - acc: 0.9047 - val_loss: 0.2386 - val_acc: 0.9075
Epoch 38/40
100/100 [==============================] - 8s 81ms/step - loss: 0.2295 - acc: 0.9053 - val_loss: 0.2547 - val_acc: 0.9062
Epoch 39/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2272 - acc: 0.9104 - val_loss: 0.2517 - val_acc: 0.8931
Epoch 40/40
100/100 [==============================] - 8s 75ms/step - loss: 0.2450 - acc: 0.9016 - val_loss: 0.3324 - val_acc: 0.8600

test accuracy: 0.8709375
test loss: 0.3005991432070732
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_5 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 256)               2097408   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 257       
=================================================================
Total params: 2,102,753
Trainable params: 2,102,753
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 11s 107ms/step - loss: 0.6133 - acc: 0.6719 - val_loss: 0.5499 - val_acc: 0.7206
Epoch 2/40
100/100 [==============================] - 10s 98ms/step - loss: 0.5546 - acc: 0.7147 - val_loss: 0.4939 - val_acc: 0.7612
Epoch 3/40
100/100 [==============================] - 9s 93ms/step - loss: 0.5024 - acc: 0.7522 - val_loss: 0.4530 - val_acc: 0.7856
Epoch 4/40
100/100 [==============================] - 9s 93ms/step - loss: 0.4510 - acc: 0.7876 - val_loss: 0.4384 - val_acc: 0.7756
Epoch 5/40
100/100 [==============================] - 11s 115ms/step - loss: 0.4362 - acc: 0.7894 - val_loss: 0.4090 - val_acc: 0.8125
Epoch 6/40
100/100 [==============================] - 10s 103ms/step - loss: 0.4237 - acc: 0.7934 - val_loss: 0.4237 - val_acc: 0.8041
Epoch 7/40
100/100 [==============================] - 10s 99ms/step - loss: 0.3999 - acc: 0.8116 - val_loss: 0.3937 - val_acc: 0.8237
Epoch 8/40
100/100 [==============================] - 10s 98ms/step - loss: 0.3990 - acc: 0.8197 - val_loss: 0.3483 - val_acc: 0.8562
Epoch 9/40
100/100 [==============================] - 9s 91ms/step - loss: 0.3783 - acc: 0.8253 - val_loss: 0.3408 - val_acc: 0.8556
Epoch 10/40
100/100 [==============================] - 9s 93ms/step - loss: 0.3614 - acc: 0.8366 - val_loss: 0.3669 - val_acc: 0.8406
Epoch 11/40
100/100 [==============================] - 9s 94ms/step - loss: 0.3552 - acc: 0.8394 - val_loss: 0.3771 - val_acc: 0.8392
Epoch 12/40
100/100 [==============================] - 9s 92ms/step - loss: 0.3383 - acc: 0.8531 - val_loss: 0.3477 - val_acc: 0.8531
Epoch 13/40
100/100 [==============================] - 9s 94ms/step - loss: 0.3381 - acc: 0.8525 - val_loss: 0.2886 - val_acc: 0.8862
Epoch 14/40
100/100 [==============================] - 9s 92ms/step - loss: 0.3265 - acc: 0.8541 - val_loss: 0.3233 - val_acc: 0.8638
Epoch 15/40
100/100 [==============================] - 9s 94ms/step - loss: 0.3143 - acc: 0.8612 - val_loss: 0.3104 - val_acc: 0.8662
Epoch 16/40
100/100 [==============================] - 9s 94ms/step - loss: 0.3052 - acc: 0.8647 - val_loss: 0.2762 - val_acc: 0.8849
Epoch 17/40
100/100 [==============================] - 10s 95ms/step - loss: 0.2793 - acc: 0.8846 - val_loss: 0.2674 - val_acc: 0.8994
Epoch 18/40
100/100 [==============================] - 9s 92ms/step - loss: 0.2976 - acc: 0.8719 - val_loss: 0.2845 - val_acc: 0.8850
Epoch 19/40
100/100 [==============================] - 9s 93ms/step - loss: 0.2811 - acc: 0.8791 - val_loss: 0.2537 - val_acc: 0.8981
Epoch 20/40
100/100 [==============================] - 10s 103ms/step - loss: 0.2862 - acc: 0.8806 - val_loss: 0.2582 - val_acc: 0.8938
Epoch 21/40
100/100 [==============================] - 10s 104ms/step - loss: 0.2760 - acc: 0.8834 - val_loss: 0.2468 - val_acc: 0.9111
Epoch 22/40
100/100 [==============================] - 10s 97ms/step - loss: 0.2662 - acc: 0.8891 - val_loss: 0.2722 - val_acc: 0.8919
Epoch 23/40
100/100 [==============================] - 10s 99ms/step - loss: 0.2554 - acc: 0.9000 - val_loss: 0.2477 - val_acc: 0.8944
Epoch 24/40
100/100 [==============================] - 11s 105ms/step - loss: 0.2615 - acc: 0.8906 - val_loss: 0.2409 - val_acc: 0.9106
Epoch 25/40
100/100 [==============================] - 10s 101ms/step - loss: 0.2542 - acc: 0.8930 - val_loss: 0.2472 - val_acc: 0.8981
Epoch 26/40
100/100 [==============================] - 9s 94ms/step - loss: 0.2469 - acc: 0.9012 - val_loss: 0.2303 - val_acc: 0.9111
Epoch 27/40
100/100 [==============================] - 9s 95ms/step - loss: 0.2637 - acc: 0.8919 - val_loss: 0.2710 - val_acc: 0.8838
Epoch 28/40
100/100 [==============================] - 9s 94ms/step - loss: 0.2415 - acc: 0.9000 - val_loss: 0.2297 - val_acc: 0.9094
Epoch 29/40
100/100 [==============================] - 9s 91ms/step - loss: 0.2556 - acc: 0.8953 - val_loss: 0.2429 - val_acc: 0.9006
Epoch 30/40
100/100 [==============================] - 9s 95ms/step - loss: 0.2350 - acc: 0.9071 - val_loss: 0.2243 - val_acc: 0.9137
Epoch 31/40
100/100 [==============================] - 9s 94ms/step - loss: 0.2350 - acc: 0.9109 - val_loss: 0.2294 - val_acc: 0.9193
Epoch 32/40
100/100 [==============================] - 10s 95ms/step - loss: 0.2372 - acc: 0.9031 - val_loss: 0.2383 - val_acc: 0.9012
Epoch 33/40
100/100 [==============================] - 10s 102ms/step - loss: 0.2348 - acc: 0.9113 - val_loss: 0.2935 - val_acc: 0.8850
Epoch 34/40
100/100 [==============================] - 10s 96ms/step - loss: 0.2318 - acc: 0.9041 - val_loss: 0.2498 - val_acc: 0.8981
Epoch 35/40
100/100 [==============================] - 11s 105ms/step - loss: 0.2255 - acc: 0.9091 - val_loss: 0.2203 - val_acc: 0.9137
Epoch 36/40
100/100 [==============================] - 10s 96ms/step - loss: 0.2413 - acc: 0.9009 - val_loss: 0.2384 - val_acc: 0.9043
Epoch 37/40
100/100 [==============================] - 10s 98ms/step - loss: 0.2302 - acc: 0.9092 - val_loss: 0.2049 - val_acc: 0.9263
Epoch 38/40
100/100 [==============================] - 10s 102ms/step - loss: 0.2248 - acc: 0.9094 - val_loss: 0.2728 - val_acc: 0.8956
Epoch 39/40
100/100 [==============================] - 9s 91ms/step - loss: 0.2365 - acc: 0.9041 - val_loss: 0.2322 - val_acc: 0.9062
Epoch 40/40
100/100 [==============================] - 9s 91ms/step - loss: 0.2116 - acc: 0.9144 - val_loss: 0.2362 - val_acc: 0.9081

test accuracy: 0.91375
test loss: 0.21697479251772164
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_6 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 512)               4194816   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 513       
=================================================================
Total params: 4,200,417
Trainable params: 4,200,417
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 14s 141ms/step - loss: 0.6363 - acc: 0.6648 - val_loss: 0.5625 - val_acc: 0.7400
Epoch 2/40
100/100 [==============================] - 12s 119ms/step - loss: 0.5368 - acc: 0.7375 - val_loss: 0.5271 - val_acc: 0.7412
Epoch 3/40
100/100 [==============================] - 8s 84ms/step - loss: 0.5082 - acc: 0.7488 - val_loss: 0.4561 - val_acc: 0.7712
Epoch 4/40
100/100 [==============================] - 8s 80ms/step - loss: 0.4531 - acc: 0.7778 - val_loss: 0.5553 - val_acc: 0.7163
Epoch 5/40
100/100 [==============================] - 8s 79ms/step - loss: 0.4432 - acc: 0.7856 - val_loss: 0.4216 - val_acc: 0.7900
Epoch 6/40
100/100 [==============================] - 9s 85ms/step - loss: 0.4120 - acc: 0.8125 - val_loss: 0.3876 - val_acc: 0.8242
Epoch 7/40
100/100 [==============================] - 9s 87ms/step - loss: 0.4025 - acc: 0.8128 - val_loss: 0.3680 - val_acc: 0.8369
Epoch 8/40
100/100 [==============================] - 8s 79ms/step - loss: 0.3864 - acc: 0.8200 - val_loss: 0.3480 - val_acc: 0.8400
Epoch 9/40
100/100 [==============================] - 8s 80ms/step - loss: 0.3616 - acc: 0.8318 - val_loss: 0.3378 - val_acc: 0.8512
Epoch 10/40
100/100 [==============================] - 8s 80ms/step - loss: 0.3539 - acc: 0.8397 - val_loss: 0.3576 - val_acc: 0.8337
Epoch 11/40
100/100 [==============================] - 8s 82ms/step - loss: 0.3486 - acc: 0.8431 - val_loss: 0.3512 - val_acc: 0.8329
Epoch 12/40
100/100 [==============================] - 8s 77ms/step - loss: 0.3525 - acc: 0.8428 - val_loss: 0.3122 - val_acc: 0.8694
Epoch 13/40
100/100 [==============================] - 8s 78ms/step - loss: 0.3410 - acc: 0.8453 - val_loss: 0.3795 - val_acc: 0.8175
Epoch 14/40
100/100 [==============================] - 8s 77ms/step - loss: 0.3191 - acc: 0.8603 - val_loss: 0.2922 - val_acc: 0.8694
Epoch 15/40
100/100 [==============================] - 8s 78ms/step - loss: 0.3256 - acc: 0.8631 - val_loss: 0.3124 - val_acc: 0.8694
Epoch 16/40
100/100 [==============================] - 8s 80ms/step - loss: 0.3137 - acc: 0.8623 - val_loss: 0.3949 - val_acc: 0.8091
Epoch 17/40
100/100 [==============================] - 8s 78ms/step - loss: 0.3083 - acc: 0.8666 - val_loss: 0.2741 - val_acc: 0.8856
Epoch 18/40
100/100 [==============================] - 8s 77ms/step - loss: 0.3003 - acc: 0.8697 - val_loss: 0.3084 - val_acc: 0.8644
Epoch 19/40
100/100 [==============================] - 8s 77ms/step - loss: 0.2988 - acc: 0.8787 - val_loss: 0.3157 - val_acc: 0.8562
Epoch 20/40
100/100 [==============================] - 8s 79ms/step - loss: 0.2970 - acc: 0.8753 - val_loss: 0.2728 - val_acc: 0.8831
Epoch 21/40
100/100 [==============================] - 8s 79ms/step - loss: 0.2652 - acc: 0.8861 - val_loss: 0.2690 - val_acc: 0.8886
Epoch 22/40
100/100 [==============================] - 9s 87ms/step - loss: 0.2823 - acc: 0.8812 - val_loss: 0.2745 - val_acc: 0.8856
Epoch 23/40
100/100 [==============================] - 9s 86ms/step - loss: 0.2815 - acc: 0.8778 - val_loss: 0.2850 - val_acc: 0.8906
Epoch 24/40
100/100 [==============================] - 9s 86ms/step - loss: 0.2869 - acc: 0.8806 - val_loss: 0.2643 - val_acc: 0.8956
Epoch 25/40
100/100 [==============================] - 9s 92ms/step - loss: 0.2638 - acc: 0.8978 - val_loss: 0.2492 - val_acc: 0.9019
Epoch 26/40
100/100 [==============================] - 9s 89ms/step - loss: 0.2814 - acc: 0.8858 - val_loss: 0.2492 - val_acc: 0.9024
Epoch 27/40
100/100 [==============================] - 9s 88ms/step - loss: 0.2564 - acc: 0.8934 - val_loss: 0.2877 - val_acc: 0.8700
Epoch 28/40
100/100 [==============================] - 9s 91ms/step - loss: 0.2485 - acc: 0.8975 - val_loss: 0.2400 - val_acc: 0.9062
Epoch 29/40
100/100 [==============================] - 9s 92ms/step - loss: 0.2658 - acc: 0.8875 - val_loss: 0.2723 - val_acc: 0.8900
Epoch 30/40
100/100 [==============================] - 9s 86ms/step - loss: 0.2500 - acc: 0.8978 - val_loss: 0.2338 - val_acc: 0.9062
Epoch 31/40
100/100 [==============================] - 9s 92ms/step - loss: 0.2594 - acc: 0.8934 - val_loss: 0.2282 - val_acc: 0.9124
Epoch 32/40
100/100 [==============================] - 9s 89ms/step - loss: 0.2472 - acc: 0.8983 - val_loss: 0.2497 - val_acc: 0.9038
Epoch 33/40
100/100 [==============================] - 9s 88ms/step - loss: 0.2569 - acc: 0.8984 - val_loss: 0.2315 - val_acc: 0.9075
Epoch 34/40
100/100 [==============================] - 9s 90ms/step - loss: 0.2437 - acc: 0.8984 - val_loss: 0.2436 - val_acc: 0.9056
Epoch 35/40
100/100 [==============================] - 9s 89ms/step - loss: 0.2466 - acc: 0.8959 - val_loss: 0.2436 - val_acc: 0.9113
Epoch 36/40
100/100 [==============================] - 9s 91ms/step - loss: 0.2374 - acc: 0.9075 - val_loss: 0.2254 - val_acc: 0.9068
Epoch 37/40
100/100 [==============================] - 9s 89ms/step - loss: 0.2461 - acc: 0.8984 - val_loss: 0.2512 - val_acc: 0.8931
Epoch 38/40
100/100 [==============================] - 10s 96ms/step - loss: 0.2219 - acc: 0.9081 - val_loss: 0.2529 - val_acc: 0.8931
Epoch 39/40
100/100 [==============================] - 9s 91ms/step - loss: 0.2439 - acc: 0.8984 - val_loss: 0.2827 - val_acc: 0.8938
Epoch 40/40
100/100 [==============================] - 9s 89ms/step - loss: 0.2319 - acc: 0.9055 - val_loss: 0.2426 - val_acc: 0.9056

test accuracy: 0.904375
test loss: 0.23282352328300476
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_7 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 1024)              8389632   
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 1025      
=================================================================
Total params: 8,395,745
Trainable params: 8,395,745
Non-trainable params: 0
_________________________________________________________________
Epoch 1/40
100/100 [==============================] - 14s 136ms/step - loss: 0.6493 - acc: 0.6734 - val_loss: 0.5796 - val_acc: 0.7369
Epoch 2/40
100/100 [==============================] - 13s 133ms/step - loss: 0.5465 - acc: 0.7381 - val_loss: 0.4899 - val_acc: 0.7656
Epoch 3/40
100/100 [==============================] - 13s 127ms/step - loss: 0.4763 - acc: 0.7772 - val_loss: 0.4486 - val_acc: 0.8000
Epoch 4/40
100/100 [==============================] - 13s 128ms/step - loss: 0.4648 - acc: 0.7774 - val_loss: 0.4953 - val_acc: 0.7675
Epoch 5/40
100/100 [==============================] - 13s 126ms/step - loss: 0.4563 - acc: 0.7762 - val_loss: 0.4054 - val_acc: 0.8200
Epoch 6/40
100/100 [==============================] - 13s 131ms/step - loss: 0.4049 - acc: 0.8119 - val_loss: 0.4347 - val_acc: 0.7929
Epoch 7/40
100/100 [==============================] - 13s 127ms/step - loss: 0.4147 - acc: 0.8088 - val_loss: 0.4124 - val_acc: 0.8013
Epoch 8/40
100/100 [==============================] - 13s 130ms/step - loss: 0.4055 - acc: 0.8103 - val_loss: 0.3708 - val_acc: 0.8237
Epoch 9/40
100/100 [==============================] - 13s 128ms/step - loss: 0.3647 - acc: 0.8359 - val_loss: 0.3572 - val_acc: 0.8387
Epoch 10/40
100/100 [==============================] - 13s 128ms/step - loss: 0.3729 - acc: 0.8231 - val_loss: 0.3433 - val_acc: 0.8456
Epoch 11/40
100/100 [==============================] - 13s 133ms/step - loss: 0.3444 - acc: 0.8472 - val_loss: 0.3202 - val_acc: 0.8630
Epoch 12/40
100/100 [==============================] - 13s 129ms/step - loss: 0.3525 - acc: 0.8413 - val_loss: 0.3442 - val_acc: 0.8581
Epoch 13/40
100/100 [==============================] - 13s 131ms/step - loss: 0.3340 - acc: 0.8506 - val_loss: 0.3241 - val_acc: 0.8581
Epoch 14/40
100/100 [==============================] - 13s 131ms/step - loss: 0.3376 - acc: 0.8538 - val_loss: 0.3270 - val_acc: 0.8625
Epoch 15/40
100/100 [==============================] - 13s 129ms/step - loss: 0.3115 - acc: 0.8672 - val_loss: 0.2610 - val_acc: 0.8969
Epoch 16/40
100/100 [==============================] - 13s 132ms/step - loss: 0.3216 - acc: 0.8575 - val_loss: 0.2797 - val_acc: 0.8861
Epoch 17/40
100/100 [==============================] - 13s 134ms/step - loss: 0.3026 - acc: 0.8650 - val_loss: 0.2761 - val_acc: 0.8800
Epoch 18/40
100/100 [==============================] - 13s 130ms/step - loss: 0.2904 - acc: 0.8766 - val_loss: 0.3034 - val_acc: 0.8619
Epoch 19/40
100/100 [==============================] - 14s 141ms/step - loss: 0.3012 - acc: 0.8722 - val_loss: 0.2736 - val_acc: 0.8844
Epoch 20/40
100/100 [==============================] - 14s 136ms/step - loss: 0.2878 - acc: 0.8825 - val_loss: 0.3003 - val_acc: 0.8669
Epoch 21/40
100/100 [==============================] - 13s 130ms/step - loss: 0.2738 - acc: 0.8850 - val_loss: 0.2531 - val_acc: 0.9111
Epoch 22/40
100/100 [==============================] - 13s 135ms/step - loss: 0.2668 - acc: 0.8877 - val_loss: 0.2607 - val_acc: 0.8925
Epoch 23/40
100/100 [==============================] - 14s 143ms/step - loss: 0.2615 - acc: 0.8906 - val_loss: 0.2494 - val_acc: 0.9006
Epoch 24/40
100/100 [==============================] - 13s 131ms/step - loss: 0.2878 - acc: 0.8750 - val_loss: 0.2524 - val_acc: 0.9025
Epoch 25/40
100/100 [==============================] - 13s 131ms/step - loss: 0.2516 - acc: 0.9009 - val_loss: 0.2448 - val_acc: 0.9100
Epoch 26/40
100/100 [==============================] - 13s 131ms/step - loss: 0.2950 - acc: 0.8725 - val_loss: 0.2613 - val_acc: 0.8924
Epoch 27/40
100/100 [==============================] - 13s 132ms/step - loss: 0.2538 - acc: 0.9025 - val_loss: 0.2295 - val_acc: 0.9044
Epoch 28/40
100/100 [==============================] - 13s 129ms/step - loss: 0.2411 - acc: 0.9022 - val_loss: 0.2711 - val_acc: 0.8900
Epoch 29/40
100/100 [==============================] - 13s 132ms/step - loss: 0.2656 - acc: 0.8883 - val_loss: 0.2454 - val_acc: 0.8969
Epoch 30/40
100/100 [==============================] - 13s 132ms/step - loss: 0.2692 - acc: 0.8878 - val_loss: 0.2450 - val_acc: 0.9100
Epoch 31/40
100/100 [==============================] - 13s 131ms/step - loss: 0.2784 - acc: 0.8866 - val_loss: 0.2568 - val_acc: 0.9024
Epoch 32/40
100/100 [==============================] - 13s 133ms/step - loss: 0.2304 - acc: 0.9066 - val_loss: 0.2299 - val_acc: 0.8994
Epoch 33/40
100/100 [==============================] - 13s 134ms/step - loss: 0.2444 - acc: 0.8953 - val_loss: 0.2270 - val_acc: 0.9156
Epoch 34/40
100/100 [==============================] - 14s 140ms/step - loss: 0.2533 - acc: 0.9016 - val_loss: 0.2465 - val_acc: 0.9031
Epoch 35/40
100/100 [==============================] - 15s 146ms/step - loss: 0.2459 - acc: 0.9009 - val_loss: 0.2688 - val_acc: 0.8938
Epoch 36/40
100/100 [==============================] - 13s 134ms/step - loss: 0.2285 - acc: 0.9028 - val_loss: 0.3395 - val_acc: 0.8579
Epoch 37/40
100/100 [==============================] - 13s 132ms/step - loss: 0.2352 - acc: 0.9012 - val_loss: 0.2719 - val_acc: 0.8925
Epoch 38/40
100/100 [==============================] - 13s 131ms/step - loss: 0.2246 - acc: 0.9078 - val_loss: 0.2520 - val_acc: 0.9038
Epoch 39/40
100/100 [==============================] - 13s 130ms/step - loss: 0.2298 - acc: 0.9094 - val_loss: 0.2374 - val_acc: 0.9012
Epoch 40/40
100/100 [==============================] - 13s 131ms/step - loss: 0.2379 - acc: 0.9047 - val_loss: 0.2141 - val_acc: 0.9200

test accuracy: 0.9071875
test loss: 0.2265555003657937

1.4 The Interpretation of Results for Architecture Choice

(1) The number of convolution-subsambling pairs: Based on the accuracy and loss of training, validation, and test sets, we found that the optimal number of convolution-subsambling pairs is 2.
(2) The number of feature maps: It appears that 16 maps in the first convolutional layer and 32 maps in the second convolutional layer is the optimal choice. Increasing the number of feature maps only improves the results slightly better, which is not worth the additional computational cost.
(3) The number of units for the dense layers: It appears that 16 units for the dense layers is the optimal choice. Increasing the number of feature maps only improves the results slightly better, which is not worth the additional computational cost.

2. Experimenting with Different Optimizers/Dropout/Regularization/etc

2.1 The hypothesis/strategy Ststement of Experiments

In this part, I will try different optimizers, dropout, and regularization to construct an optimal CNN. The optimizers I will try include SGD, Adagrad, Adadelta, RMSprop, and Adam. Dropout with different rates (i.e., the fraction of the input units to drop) will be tried to reduce the overfitting as much as possible. And different regularizers will be used to apply penalties on layer parameters.

2.2 The Types of Tests for Experiments

Here, the accuracy and loss of training, validation, and test sets obtained from different architectures will be used to choose the optimal architecture. The final values of accuracy and loss and how the accuracy and loss change with epochs will be compared too determine the optimal values.

2.3 The Code and Results for Experiments

Here, we set the number of convolution-subsambling pairs as 2, use 16 maps in the first convolutional layer and 32 maps in the second convolutional layer, and set the number of units for the dense layers as 16. Based on the observations, we find that using 20 epochs is enough to obtain an optimal result, therefore we will use 20 epochs in the following experiments.

2.3.1 Experimenting with Different Optimizers

Five different optimizers, including SGD, Adagrad, Adadelta, RMSprop, and Adam, are tried in this part.

In [23]:
def CNN_Optimizer_Selector (activation_function_1, activation_function_2):
    
    sgd = optimizers.SGD(lr = 0.05, decay=1e-5, momentum=0.9, nesterov=True)
    RMSprop = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
    Adagrad = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
    Adadelta = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
    Adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    
    optimizer_type = ['sgd', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam']
    
    K.clear_session()
    model = [0] * 5
    
    for j in range(0, 5):
        
        model[j] = models.Sequential()
        model[j].add(layers.Conv2D(16, (3, 3), padding='same', activation=activation_function_1,
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool1'))
        model[j].add(layers.Conv2D(32, (3, 3), padding='same', activation=activation_function_1, name = 'conv2'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool2'))
        model[j].add(layers.Flatten())

        model[j].add(layers.Dense(16, kernel_initializer='glorot_uniform', activation=activation_function_1, name='fc1'))
        model[j].add(layers.Dense(1, kernel_initializer='glorot_uniform', activation=activation_function_2, name='fc2'))
        model[j].compile(loss='binary_crossentropy', optimizer = optimizer_type[j], metrics=['accuracy'])
    
        print('\ntest optimizer:', optimizer_type[j])
    
        model[j].summary()
        
        # Fit the model
        history = model[j].fit_generator(
        train_generator,
        steps_per_epoch=100,
        epochs=20,
        validation_data=validation_generator,
        validation_steps=50,
        verbose=1, 
        # callbacks=[TrainValTensorBoard("logs/{}".format(time()), write_graph=True)]
        )
        test_loss, test_acc = model[j].evaluate_generator(test_generator, steps=100)
        print('\ntest accuracy:', test_acc)
        print('test loss:', test_loss)
    
        acc = history.history['acc']
        val_acc = history.history['val_acc']
        loss = history.history['loss']
        val_loss = history.history['val_loss']
        epochs = range(len(acc))
    
        plt.plot(epochs, acc, 'bo', label='Training acc')
        plt.plot(epochs, val_acc, 'g-', label='Validation acc')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Accuracy")
        plt.title('Training and validation accuracy')
        plt.legend()
        plt.figure()

        plt.plot(epochs, loss, 'bo', label='Training loss')
        plt.plot(epochs, val_loss, 'g-', label='Validation loss')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Loss")
        plt.title('Training and validation loss')
        plt.legend()
        
        plt.show()
In [22]:
CNN_Optimizer_Selector('relu', 'sigmoid')
test Optimizer: sgd
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 16)                131088    
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 17        
=================================================================
Total params: 136,193
Trainable params: 136,193
Non-trainable params: 0
_________________________________________________________________
Epoch 1/20
100/100 [==============================] - 9s 93ms/step - loss: 0.6619 - acc: 0.6416 - val_loss: 0.6148 - val_acc: 0.7400
Epoch 2/20
100/100 [==============================] - 8s 80ms/step - loss: 0.5575 - acc: 0.7320 - val_loss: 0.5247 - val_acc: 0.7538
Epoch 3/20
100/100 [==============================] - 7s 72ms/step - loss: 0.4979 - acc: 0.7700 - val_loss: 0.4789 - val_acc: 0.7662
Epoch 4/20
100/100 [==============================] - 8s 80ms/step - loss: 0.4754 - acc: 0.7759 - val_loss: 0.4464 - val_acc: 0.80690.4905 -  - ETA: 2s -
Epoch 5/20
100/100 [==============================] - 7s 72ms/step - loss: 0.4269 - acc: 0.8059 - val_loss: 0.4009 - val_acc: 0.8144
Epoch 6/20
100/100 [==============================] - 8s 79ms/step - loss: 0.4204 - acc: 0.8034 - val_loss: 0.4203 - val_acc: 0.8048
Epoch 7/20
100/100 [==============================] - 7s 71ms/step - loss: 0.3967 - acc: 0.8219 - val_loss: 0.3608 - val_acc: 0.8406
Epoch 8/20
100/100 [==============================] - 7s 72ms/step - loss: 0.3929 - acc: 0.8220 - val_loss: 0.3659 - val_acc: 0.8375
Epoch 9/20
100/100 [==============================] - 7s 67ms/step - loss: 0.3586 - acc: 0.8372 - val_loss: 0.3562 - val_acc: 0.8462
Epoch 10/20
100/100 [==============================] - 8s 75ms/step - loss: 0.3559 - acc: 0.8406 - val_loss: 0.3236 - val_acc: 0.8625
Epoch 11/20
100/100 [==============================] - 8s 84ms/step - loss: 0.3363 - acc: 0.8512 - val_loss: 0.3285 - val_acc: 0.8542
Epoch 12/20
100/100 [==============================] - 9s 94ms/step - loss: 0.3322 - acc: 0.8566 - val_loss: 0.3202 - val_acc: 0.8544
Epoch 13/20
100/100 [==============================] - 8s 78ms/step - loss: 0.3316 - acc: 0.8512 - val_loss: 0.3093 - val_acc: 0.8619
Epoch 14/20
100/100 [==============================] - 8s 84ms/step - loss: 0.3150 - acc: 0.8641 - val_loss: 0.2899 - val_acc: 0.8881
Epoch 15/20
100/100 [==============================] - 8s 76ms/step - loss: 0.3302 - acc: 0.8550 - val_loss: 0.2931 - val_acc: 0.8750
Epoch 16/20
100/100 [==============================] - 8s 84ms/step - loss: 0.3046 - acc: 0.8759 - val_loss: 0.3072 - val_acc: 0.8648
Epoch 17/20
100/100 [==============================] - 9s 87ms/step - loss: 0.2893 - acc: 0.8713 - val_loss: 0.3646 - val_acc: 0.8419
Epoch 18/20
100/100 [==============================] - 9s 86ms/step - loss: 0.2888 - acc: 0.8797 - val_loss: 0.4966 - val_acc: 0.7712
Epoch 19/20
100/100 [==============================] - 8s 77ms/step - loss: 0.2856 - acc: 0.8692 - val_loss: 0.2576 - val_acc: 0.8975
Epoch 20/20
100/100 [==============================] - 8s 82ms/step - loss: 0.2942 - acc: 0.8738 - val_loss: 0.2726 - val_acc: 0.8875

test accuracy: 0.8971875
test loss: 0.25826115645468234
test Optimizer: RMSprop
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_2 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 16)                131088    
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 17        
=================================================================
Total params: 136,193
Trainable params: 136,193
Non-trainable params: 0
_________________________________________________________________
Epoch 1/20
100/100 [==============================] - 9s 90ms/step - loss: 0.4814 - acc: 0.7691 - val_loss: 0.3329 - val_acc: 0.8569
Epoch 2/20
100/100 [==============================] - 7s 68ms/step - loss: 0.3704 - acc: 0.8409 - val_loss: 0.3016 - val_acc: 0.8806
Epoch 3/20
100/100 [==============================] - 7s 70ms/step - loss: 0.3245 - acc: 0.8603 - val_loss: 0.2789 - val_acc: 0.8869
Epoch 4/20
100/100 [==============================] - 7s 67ms/step - loss: 0.2982 - acc: 0.8738 - val_loss: 0.2980 - val_acc: 0.8681
Epoch 5/20
100/100 [==============================] - 7s 68ms/step - loss: 0.2957 - acc: 0.8762 - val_loss: 0.3085 - val_acc: 0.8781
Epoch 6/20
100/100 [==============================] - 7s 72ms/step - loss: 0.2671 - acc: 0.8930 - val_loss: 0.2573 - val_acc: 0.9061
Epoch 7/20
100/100 [==============================] - 7s 67ms/step - loss: 0.2515 - acc: 0.8978 - val_loss: 0.2263 - val_acc: 0.9038
Epoch 8/20
100/100 [==============================] - 7s 69ms/step - loss: 0.2428 - acc: 0.8988 - val_loss: 0.2287 - val_acc: 0.9038
Epoch 9/20
100/100 [==============================] - 7s 67ms/step - loss: 0.2675 - acc: 0.8900 - val_loss: 0.3251 - val_acc: 0.8706
Epoch 10/20
100/100 [==============================] - 7s 69ms/step - loss: 0.2285 - acc: 0.9122 - val_loss: 0.2292 - val_acc: 0.9100
Epoch 11/20
100/100 [==============================] - 8s 78ms/step - loss: 0.2273 - acc: 0.9100 - val_loss: 0.2060 - val_acc: 0.9155
Epoch 12/20
100/100 [==============================] - 8s 77ms/step - loss: 0.2153 - acc: 0.9083 - val_loss: 0.2572 - val_acc: 0.9087
Epoch 13/20
100/100 [==============================] - 7s 69ms/step - loss: 0.2287 - acc: 0.9056 - val_loss: 0.2362 - val_acc: 0.9000
Epoch 14/20
100/100 [==============================] - 8s 76ms/step - loss: 0.2090 - acc: 0.9200 - val_loss: 0.2300 - val_acc: 0.9062
Epoch 15/20
100/100 [==============================] - 8s 77ms/step - loss: 0.2168 - acc: 0.9131 - val_loss: 0.2139 - val_acc: 0.9200
Epoch 16/20
100/100 [==============================] - 8s 79ms/step - loss: 0.2242 - acc: 0.9141 - val_loss: 0.2060 - val_acc: 0.9212
Epoch 17/20
100/100 [==============================] - 7s 74ms/step - loss: 0.1914 - acc: 0.9200 - val_loss: 0.1967 - val_acc: 0.9256 - ETA: 0s - loss: 0.1905 - acc
Epoch 18/20
100/100 [==============================] - 7s 70ms/step - loss: 0.1971 - acc: 0.9206 - val_loss: 0.2351 - val_acc: 0.9087
Epoch 19/20
100/100 [==============================] - 7s 72ms/step - loss: 0.1834 - acc: 0.9246 - val_loss: 0.2196 - val_acc: 0.9125
Epoch 20/20
100/100 [==============================] - 7s 72ms/step - loss: 0.2023 - acc: 0.9188 - val_loss: 0.2490 - val_acc: 0.9050

test accuracy: 0.9046875
test loss: 0.2385796570777893
test Optimizer: Adagrad
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_3 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 16)                131088    
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 17        
=================================================================
Total params: 136,193
Trainable params: 136,193
Non-trainable params: 0
_________________________________________________________________
Epoch 1/20
100/100 [==============================] - 9s 89ms/step - loss: 0.5432 - acc: 0.7602 - val_loss: 0.3820 - val_acc: 0.8294
Epoch 2/20
100/100 [==============================] - 8s 81ms/step - loss: 0.3482 - acc: 0.8466 - val_loss: 0.3382 - val_acc: 0.8538
Epoch 3/20
100/100 [==============================] - 7s 74ms/step - loss: 0.3105 - acc: 0.8634 - val_loss: 0.2927 - val_acc: 0.8712
Epoch 4/20
100/100 [==============================] - 8s 80ms/step - loss: 0.2856 - acc: 0.8809 - val_loss: 0.2635 - val_acc: 0.8938
Epoch 5/20
100/100 [==============================] - 7s 75ms/step - loss: 0.2748 - acc: 0.8881 - val_loss: 0.2491 - val_acc: 0.9019
Epoch 6/20
100/100 [==============================] - 8s 78ms/step - loss: 0.2629 - acc: 0.8916 - val_loss: 0.2493 - val_acc: 0.8961
Epoch 7/20
100/100 [==============================] - 7s 71ms/step - loss: 0.2465 - acc: 0.8994 - val_loss: 0.2269 - val_acc: 0.9163
Epoch 8/20
100/100 [==============================] - 8s 76ms/step - loss: 0.2509 - acc: 0.8999 - val_loss: 0.2558 - val_acc: 0.9000
Epoch 9/20
100/100 [==============================] - 7s 70ms/step - loss: 0.2168 - acc: 0.9147 - val_loss: 0.2442 - val_acc: 0.9006
Epoch 10/20
100/100 [==============================] - 8s 80ms/step - loss: 0.2226 - acc: 0.9087 - val_loss: 0.2284 - val_acc: 0.9119
Epoch 11/20
100/100 [==============================] - 7s 75ms/step - loss: 0.2126 - acc: 0.9097 - val_loss: 0.2257 - val_acc: 0.9161
Epoch 12/20
100/100 [==============================] - 8s 81ms/step - loss: 0.2235 - acc: 0.9116 - val_loss: 0.2635 - val_acc: 0.8881
Epoch 13/20
100/100 [==============================] - 7s 72ms/step - loss: 0.2029 - acc: 0.9222 - val_loss: 0.2509 - val_acc: 0.8975.
Epoch 14/20
100/100 [==============================] - 7s 72ms/step - loss: 0.2204 - acc: 0.9191 - val_loss: 0.2128 - val_acc: 0.9169
Epoch 15/20
100/100 [==============================] - 7s 74ms/step - loss: 0.2042 - acc: 0.9166 - val_loss: 0.2250 - val_acc: 0.9156
Epoch 16/20
100/100 [==============================] - 7s 69ms/step - loss: 0.2278 - acc: 0.9086 - val_loss: 0.2341 - val_acc: 0.9130
Epoch 17/20
100/100 [==============================] - 7s 68ms/step - loss: 0.1961 - acc: 0.9200 - val_loss: 0.2299 - val_acc: 0.9175
Epoch 18/20
100/100 [==============================] - 7s 67ms/step - loss: 0.1837 - acc: 0.9256 - val_loss: 0.2162 - val_acc: 0.9163
Epoch 19/20
100/100 [==============================] - 7s 68ms/step - loss: 0.1911 - acc: 0.9234 - val_loss: 0.2189 - val_acc: 0.9206
Epoch 20/20
100/100 [==============================] - 7s 65ms/step - loss: 0.1966 - acc: 0.9241 - val_loss: 0.2209 - val_acc: 0.9175

test accuracy: 0.9140625
test loss: 0.21626812890172004
test Optimizer: Adadelta
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_4 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 16)                131088    
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 17        
=================================================================
Total params: 136,193
Trainable params: 136,193
Non-trainable params: 0
_________________________________________________________________
Epoch 1/20
100/100 [==============================] - 8s 83ms/step - loss: 0.6032 - acc: 0.6822 - val_loss: 0.4389 - val_acc: 0.8050
Epoch 2/20
100/100 [==============================] - 7s 67ms/step - loss: 0.4131 - acc: 0.8059 - val_loss: 0.3381 - val_acc: 0.8600
Epoch 3/20
100/100 [==============================] - 7s 68ms/step - loss: 0.3497 - acc: 0.8478 - val_loss: 0.3369 - val_acc: 0.8588
Epoch 4/20
100/100 [==============================] - 7s 72ms/step - loss: 0.3411 - acc: 0.8503 - val_loss: 0.3437 - val_acc: 0.8569
Epoch 5/20
100/100 [==============================] - 7s 70ms/step - loss: 0.3154 - acc: 0.8711 - val_loss: 0.2737 - val_acc: 0.88383s - loss: 0.31 - ETA: 1s -
Epoch 6/20
100/100 [==============================] - 7s 71ms/step - loss: 0.2856 - acc: 0.8759 - val_loss: 0.2447 - val_acc: 0.9043
Epoch 7/20
100/100 [==============================] - 7s 72ms/step - loss: 0.2636 - acc: 0.8925 - val_loss: 0.2809 - val_acc: 0.8869
Epoch 8/20
100/100 [==============================] - 7s 69ms/step - loss: 0.2490 - acc: 0.8988 - val_loss: 0.2324 - val_acc: 0.9087
Epoch 9/20
100/100 [==============================] - 7s 70ms/step - loss: 0.2644 - acc: 0.8912 - val_loss: 0.3114 - val_acc: 0.8638
Epoch 10/20
100/100 [==============================] - 8s 79ms/step - loss: 0.2448 - acc: 0.9036 - val_loss: 0.4399 - val_acc: 0.8075
Epoch 11/20
100/100 [==============================] - 8s 84ms/step - loss: 0.2374 - acc: 0.8991 - val_loss: 0.2742 - val_acc: 0.8942
Epoch 12/20
100/100 [==============================] - 8s 77ms/step - loss: 0.2332 - acc: 0.9031 - val_loss: 0.2944 - val_acc: 0.8675
Epoch 13/20
100/100 [==============================] - 9s 89ms/step - loss: 0.2225 - acc: 0.9137 - val_loss: 0.2233 - val_acc: 0.9075
Epoch 14/20
100/100 [==============================] - 8s 81ms/step - loss: 0.2170 - acc: 0.9149 - val_loss: 0.2160 - val_acc: 0.9163
Epoch 15/20
100/100 [==============================] - 8s 85ms/step - loss: 0.2317 - acc: 0.9087 - val_loss: 0.2046 - val_acc: 0.9194
Epoch 16/20
100/100 [==============================] - 8s 80ms/step - loss: 0.2144 - acc: 0.9106 - val_loss: 0.2128 - val_acc: 0.918010
Epoch 17/20
100/100 [==============================] - 8s 84ms/step - loss: 0.2070 - acc: 0.9200 - val_loss: 0.2275 - val_acc: 0.9000
Epoch 18/20
100/100 [==============================] - 8s 79ms/step - loss: 0.1954 - acc: 0.9266 - val_loss: 0.2108 - val_acc: 0.9169
Epoch 19/20
100/100 [==============================] - 9s 85ms/step - loss: 0.2056 - acc: 0.9178 - val_loss: 0.2312 - val_acc: 0.9100
Epoch 20/20
100/100 [==============================] - 8s 77ms/step - loss: 0.1992 - acc: 0.9156 - val_loss: 0.2071 - val_acc: 0.9175

test accuracy: 0.9221875
test loss: 0.20159238416701555
test Optimizer: Adam
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 64, 64, 16)        448       
_________________________________________________________________
max_pool1 (MaxPooling2D)     (None, 32, 32, 16)        0         
_________________________________________________________________
conv2 (Conv2D)               (None, 32, 32, 32)        4640      
_________________________________________________________________
max_pool2 (MaxPooling2D)     (None, 16, 16, 32)        0         
_________________________________________________________________
flatten_5 (Flatten)          (None, 8192)              0         
_________________________________________________________________
fc1 (Dense)                  (None, 16)                131088    
_________________________________________________________________
fc2 (Dense)                  (None, 1)                 17        
=================================================================
Total params: 136,193
Trainable params: 136,193
Non-trainable params: 0
_________________________________________________________________
Epoch 1/20
100/100 [==============================] - 9s 90ms/step - loss: 0.4496 - acc: 0.7875 - val_loss: 0.3102 - val_acc: 0.8719
Epoch 2/20
100/100 [==============================] - 8s 75ms/step - loss: 0.3343 - acc: 0.8547 - val_loss: 0.3011 - val_acc: 0.8681
Epoch 3/20
100/100 [==============================] - 7s 74ms/step - loss: 0.2940 - acc: 0.8812 - val_loss: 0.2870 - val_acc: 0.8869
Epoch 4/20
100/100 [==============================] - 8s 76ms/step - loss: 0.2676 - acc: 0.8912 - val_loss: 0.3052 - val_acc: 0.8775
Epoch 5/20
100/100 [==============================] - 7s 71ms/step - loss: 0.2734 - acc: 0.8895 - val_loss: 0.2228 - val_acc: 0.9175
Epoch 6/20
100/100 [==============================] - 8s 80ms/step - loss: 0.2248 - acc: 0.9103 - val_loss: 0.2668 - val_acc: 0.8967
Epoch 7/20
100/100 [==============================] - 8s 76ms/step - loss: 0.2389 - acc: 0.9038 - val_loss: 0.2516 - val_acc: 0.9075
Epoch 8/20
100/100 [==============================] - 8s 78ms/step - loss: 0.2467 - acc: 0.9038 - val_loss: 0.2342 - val_acc: 0.9119
Epoch 9/20
100/100 [==============================] - 8s 76ms/step - loss: 0.2348 - acc: 0.9083 - val_loss: 0.2184 - val_acc: 0.9169
Epoch 10/20
100/100 [==============================] - 7s 74ms/step - loss: 0.2297 - acc: 0.9084 - val_loss: 0.2519 - val_acc: 0.9019
Epoch 11/20
100/100 [==============================] - 8s 77ms/step - loss: 0.2057 - acc: 0.9221 - val_loss: 0.2073 - val_acc: 0.9230
Epoch 12/20
100/100 [==============================] - 7s 74ms/step - loss: 0.1964 - acc: 0.9228 - val_loss: 0.2396 - val_acc: 0.9062
Epoch 13/20
100/100 [==============================] - 8s 75ms/step - loss: 0.2039 - acc: 0.9178 - val_loss: 0.2075 - val_acc: 0.9181
Epoch 14/20
100/100 [==============================] - 7s 74ms/step - loss: 0.2228 - acc: 0.9141 - val_loss: 0.2187 - val_acc: 0.9156
Epoch 15/20
100/100 [==============================] - 8s 77ms/step - loss: 0.2074 - acc: 0.9122 - val_loss: 0.2453 - val_acc: 0.9069
Epoch 16/20
100/100 [==============================] - 7s 74ms/step - loss: 0.2005 - acc: 0.9222 - val_loss: 0.2040 - val_acc: 0.9111
Epoch 17/20
100/100 [==============================] - 8s 79ms/step - loss: 0.1883 - acc: 0.9261 - val_loss: 0.2084 - val_acc: 0.9200
Epoch 18/20
100/100 [==============================] - 7s 73ms/step - loss: 0.1728 - acc: 0.9306 - val_loss: 0.2534 - val_acc: 0.9062
Epoch 19/20
100/100 [==============================] - 8s 78ms/step - loss: 0.2101 - acc: 0.9159 - val_loss: 0.2242 - val_acc: 0.9100
Epoch 20/20
100/100 [==============================] - 7s 74ms/step - loss: 0.2104 - acc: 0.9166 - val_loss: 0.2062 - val_acc: 0.9231

test accuracy: 0.9196875
test loss: 0.20859682226553558

2.3.2 Experiments with Different Optimal Dropouts

Five different rates, including 0, 0.2, 0.5, 0.8, and 1, are tried for the dropout in this part.

In [67]:
def CNN_Dropout_Selector (activation_function_1, activation_function_2):
    
    K.clear_session()
    
    Adadelta = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
    
    rate = [0.0, 0.2, 0.5, 0.8, 1.0]
    

    model = [0] * 5
    
    for j in range(0, 5):
        
        model[j] = models.Sequential()
        model[j].add(layers.Conv2D(16, (3, 3), padding='same', activation=activation_function_1,
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
        
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool1'))
        model[j].add(layers.Conv2D(32, (3, 3), padding='same', activation=activation_function_1, name = 'conv2'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool2'))
        model[j].add(layers.Flatten())

        model[j].add(layers.Dense(16, kernel_initializer='glorot_uniform', activation=activation_function_1, name='fc1'))
        model[j].add(layers.Dropout(rate[j], noise_shape=None, seed=None))
        
        model[j].add(layers.Dense(1, kernel_initializer='glorot_uniform', activation=activation_function_2, name='fc2'))
        model[j].add(layers.Dropout(rate[j], noise_shape=None, seed=None))
        
        model[j].compile(loss='binary_crossentropy', optimizer = Adadelta, metrics=['accuracy'])
    
        print('\n Dropout Rate:', rate[j])
        
        # Fit the model
        history = model[j].fit_generator(
        train_generator,
        steps_per_epoch=100,
        epochs=20,
        validation_data=validation_generator,
        validation_steps=50,
        verbose=1, 
        # callbacks=[TrainValTensorBoard("logs/{}".format(time()), write_graph=True)]
        )
        test_loss, test_acc = model[j].evaluate_generator(test_generator, steps=100)
        print('\ntest accuracy:', test_acc)
        print('test loss:', test_loss)
    
        acc = history.history['acc']
        val_acc = history.history['val_acc']
        loss = history.history['loss']
        val_loss = history.history['val_loss']
        epochs = range(len(acc))
    
        plt.plot(epochs, acc, 'bo', label='Training acc')
        plt.plot(epochs, val_acc, 'g-', label='Validation acc')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Accuracy")
        plt.title('Training and validation accuracy')
        plt.legend()
        plt.figure()

        plt.plot(epochs, loss, 'bo', label='Training loss')
        plt.plot(epochs, val_loss, 'g-', label='Validation loss')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Loss")
        plt.title('Training and validation loss')
        plt.legend()
        
        plt.show()
In [68]:
CNN_Dropout_Selector('relu', 'sigmoid')
 Dropout Rate: 0.0
Epoch 1/20
100/100 [==============================] - 5s 48ms/step - loss: 0.5429 - acc: 0.7294 - val_loss: 0.3949 - val_acc: 0.8294
Epoch 2/20
100/100 [==============================] - 5s 47ms/step - loss: 0.4066 - acc: 0.8153 - val_loss: 0.3262 - val_acc: 0.8650
Epoch 3/20
100/100 [==============================] - 5s 46ms/step - loss: 0.3534 - acc: 0.8447 - val_loss: 0.3070 - val_acc: 0.8706
Epoch 4/20
100/100 [==============================] - 4s 39ms/step - loss: 0.3123 - acc: 0.8675 - val_loss: 0.2631 - val_acc: 0.8912
Epoch 5/20
100/100 [==============================] - 4s 40ms/step - loss: 0.2917 - acc: 0.8747 - val_loss: 0.3060 - val_acc: 0.8544
Epoch 6/20
100/100 [==============================] - 4s 43ms/step - loss: 0.2621 - acc: 0.8993 - val_loss: 0.2305 - val_acc: 0.9168
Epoch 7/20
100/100 [==============================] - 4s 43ms/step - loss: 0.2503 - acc: 0.8956 - val_loss: 0.2403 - val_acc: 0.9025
Epoch 8/20
100/100 [==============================] - 4s 43ms/step - loss: 0.2489 - acc: 0.8931 - val_loss: 0.3606 - val_acc: 0.8450
Epoch 9/20
100/100 [==============================] - 5s 46ms/step - loss: 0.2534 - acc: 0.8941 - val_loss: 0.2043 - val_acc: 0.9194
Epoch 10/20
100/100 [==============================] - 5s 50ms/step - loss: 0.2400 - acc: 0.9059 - val_loss: 0.2480 - val_acc: 0.9012
Epoch 11/20
100/100 [==============================] - 5s 54ms/step - loss: 0.2352 - acc: 0.8987 - val_loss: 0.2239 - val_acc: 0.9149
Epoch 12/20
100/100 [==============================] - 5s 46ms/step - loss: 0.2225 - acc: 0.9081 - val_loss: 0.2491 - val_acc: 0.8956
Epoch 13/20
100/100 [==============================] - 5s 51ms/step - loss: 0.2120 - acc: 0.9184 - val_loss: 0.2858 - val_acc: 0.8844
Epoch 14/20
100/100 [==============================] - 5s 49ms/step - loss: 0.2194 - acc: 0.9078 - val_loss: 0.2638 - val_acc: 0.8694
Epoch 15/20
100/100 [==============================] - 5s 47ms/step - loss: 0.2113 - acc: 0.9159 - val_loss: 0.2033 - val_acc: 0.9131
Epoch 16/20
100/100 [==============================] - 5s 49ms/step - loss: 0.2107 - acc: 0.9134 - val_loss: 0.2208 - val_acc: 0.9093
Epoch 17/20
100/100 [==============================] - 5s 47ms/step - loss: 0.2035 - acc: 0.9172 - val_loss: 0.1824 - val_acc: 0.9294
Epoch 18/20
100/100 [==============================] - 4s 45ms/step - loss: 0.1939 - acc: 0.9253 - val_loss: 0.2033 - val_acc: 0.9187
Epoch 19/20
100/100 [==============================] - 5s 45ms/step - loss: 0.2023 - acc: 0.9118 - val_loss: 0.2054 - val_acc: 0.9200
Epoch 20/20
100/100 [==============================] - 5s 45ms/step - loss: 0.2040 - acc: 0.9166 - val_loss: 0.2044 - val_acc: 0.9137

test accuracy: 0.916875
test loss: 0.21556401148438453
 Dropout Rate: 0.2
Epoch 1/20
100/100 [==============================] - 6s 56ms/step - loss: 2.1532 - acc: 0.6819 - val_loss: 0.5289 - val_acc: 0.6794
Epoch 2/20
100/100 [==============================] - 6s 56ms/step - loss: 2.0695 - acc: 0.7503 - val_loss: 0.3470 - val_acc: 0.8700
Epoch 3/20
100/100 [==============================] - 5s 52ms/step - loss: 2.0292 - acc: 0.7712 - val_loss: 0.3517 - val_acc: 0.8681
Epoch 4/20
100/100 [==============================] - 5s 49ms/step - loss: 1.9228 - acc: 0.7738 - val_loss: 0.4077 - val_acc: 0.8300
Epoch 5/20
100/100 [==============================] - 5s 53ms/step - loss: 2.0115 - acc: 0.7778 - val_loss: 0.3391 - val_acc: 0.8756
Epoch 6/20
100/100 [==============================] - 6s 56ms/step - loss: 1.8902 - acc: 0.8060 - val_loss: 0.2908 - val_acc: 0.8836
Epoch 7/20
100/100 [==============================] - 5s 48ms/step - loss: 1.8363 - acc: 0.7960 - val_loss: 0.3217 - val_acc: 0.8862
Epoch 8/20
100/100 [==============================] - 5s 51ms/step - loss: 2.0011 - acc: 0.7956 - val_loss: 0.3574 - val_acc: 0.8462
Epoch 9/20
100/100 [==============================] - 5s 45ms/step - loss: 2.0696 - acc: 0.8041 - val_loss: 0.3351 - val_acc: 0.8556
Epoch 10/20
100/100 [==============================] - 5s 45ms/step - loss: 2.0030 - acc: 0.8084 - val_loss: 0.2291 - val_acc: 0.9175
Epoch 11/20
100/100 [==============================] - 5s 53ms/step - loss: 1.9662 - acc: 0.8044 - val_loss: 0.3065 - val_acc: 0.8730
Epoch 12/20
100/100 [==============================] - 5s 51ms/step - loss: 2.0028 - acc: 0.8069 - val_loss: 0.2875 - val_acc: 0.9006
Epoch 13/20
100/100 [==============================] - 4s 43ms/step - loss: 1.8512 - acc: 0.8107 - val_loss: 0.3105 - val_acc: 0.8794
Epoch 14/20
100/100 [==============================] - 5s 52ms/step - loss: 1.9163 - acc: 0.8147 - val_loss: 0.2941 - val_acc: 0.9000
Epoch 15/20
100/100 [==============================] - 5s 47ms/step - loss: 1.9330 - acc: 0.8259 - val_loss: 0.2478 - val_acc: 0.9150
Epoch 16/20
100/100 [==============================] - 5s 46ms/step - loss: 2.1788 - acc: 0.8025 - val_loss: 0.2566 - val_acc: 0.9136
Epoch 17/20
100/100 [==============================] - 5s 45ms/step - loss: 2.1245 - acc: 0.8006 - val_loss: 0.2438 - val_acc: 0.9094
Epoch 18/20
100/100 [==============================] - 5s 50ms/step - loss: 2.0383 - acc: 0.8156 - val_loss: 0.2414 - val_acc: 0.9150
Epoch 19/20
100/100 [==============================] - 6s 55ms/step - loss: 1.9586 - acc: 0.8147 - val_loss: 0.2216 - val_acc: 0.9144
Epoch 20/20
100/100 [==============================] - 5s 45ms/step - loss: 1.8740 - acc: 0.8266 - val_loss: 0.2406 - val_acc: 0.9012

test accuracy: 0.909375
test loss: 0.24699302062392234
 Dropout Rate: 0.5
Epoch 1/20
100/100 [==============================] - 5s 54ms/step - loss: 4.6138 - acc: 0.5778 - val_loss: 0.7736 - val_acc: 0.4813
Epoch 2/20
100/100 [==============================] - 4s 44ms/step - loss: 4.7414 - acc: 0.5981 - val_loss: 0.6791 - val_acc: 0.4925
Epoch 3/20
100/100 [==============================] - 4s 44ms/step - loss: 4.5734 - acc: 0.6266 - val_loss: 0.5831 - val_acc: 0.5931
Epoch 4/20
100/100 [==============================] - 5s 45ms/step - loss: 4.5598 - acc: 0.6309 - val_loss: 0.5661 - val_acc: 0.6000
Epoch 5/20
100/100 [==============================] - 5s 49ms/step - loss: 4.4344 - acc: 0.6478 - val_loss: 0.5486 - val_acc: 0.5631
Epoch 6/20
100/100 [==============================] - 6s 55ms/step - loss: 4.5777 - acc: 0.6584 - val_loss: 0.7094 - val_acc: 0.5375
Epoch 7/20
100/100 [==============================] - 5s 46ms/step - loss: 4.4753 - acc: 0.6575 - val_loss: 0.6715 - val_acc: 0.5281
Epoch 8/20
100/100 [==============================] - 5s 47ms/step - loss: 4.5403 - acc: 0.6575 - val_loss: 0.6254 - val_acc: 0.5238
Epoch 9/20
100/100 [==============================] - 5s 47ms/step - loss: 4.4977 - acc: 0.6579 - val_loss: 0.5551 - val_acc: 0.7006
Epoch 10/20
100/100 [==============================] - 5s 53ms/step - loss: 4.5519 - acc: 0.6628 - val_loss: 0.5043 - val_acc: 0.6994
Epoch 11/20
100/100 [==============================] - 6s 59ms/step - loss: 4.5828 - acc: 0.6597 - val_loss: 0.4618 - val_acc: 0.7922
Epoch 12/20
100/100 [==============================] - 5s 55ms/step - loss: 4.5313 - acc: 0.6528 - val_loss: 0.4803 - val_acc: 0.7675
Epoch 13/20
100/100 [==============================] - 5s 51ms/step - loss: 4.5452 - acc: 0.6631 - val_loss: 0.6136 - val_acc: 0.5725
Epoch 14/20
100/100 [==============================] - 5s 49ms/step - loss: 4.6961 - acc: 0.6478 - val_loss: 0.3863 - val_acc: 0.8744
Epoch 15/20
100/100 [==============================] - 5s 53ms/step - loss: 4.6228 - acc: 0.6429 - val_loss: 0.4559 - val_acc: 0.7875
Epoch 16/20
100/100 [==============================] - 5s 50ms/step - loss: 4.2420 - acc: 0.6806 - val_loss: 0.4967 - val_acc: 0.6502
Epoch 17/20
100/100 [==============================] - 5s 49ms/step - loss: 4.5348 - acc: 0.6539 - val_loss: 0.5115 - val_acc: 0.6694
Epoch 18/20
100/100 [==============================] - 6s 56ms/step - loss: 4.3478 - acc: 0.6609 - val_loss: 0.4865 - val_acc: 0.7238
Epoch 19/20
100/100 [==============================] - 4s 45ms/step - loss: 4.5363 - acc: 0.6519 - val_loss: 0.4431 - val_acc: 0.7625
Epoch 20/20
100/100 [==============================] - 5s 45ms/step - loss: 4.4014 - acc: 0.6519 - val_loss: 0.4806 - val_acc: 0.7319

test accuracy: 0.71625
test loss: 0.5172570446133613
 Dropout Rate: 0.8
Epoch 1/20
100/100 [==============================] - 5s 54ms/step - loss: 8.1344 - acc: 0.3879 - val_loss: 0.6956 - val_acc: 0.4938
Epoch 2/20
100/100 [==============================] - 5s 51ms/step - loss: 8.3897 - acc: 0.3694 - val_loss: 0.6957 - val_acc: 0.4775
Epoch 3/20
100/100 [==============================] - 5s 49ms/step - loss: 8.3453 - acc: 0.3669 - val_loss: 0.6969 - val_acc: 0.4788
Epoch 4/20
100/100 [==============================] - 5s 49ms/step - loss: 8.4397 - acc: 0.3762 - val_loss: 0.6967 - val_acc: 0.4781
Epoch 5/20
100/100 [==============================] - 5s 50ms/step - loss: 8.5066 - acc: 0.3703 - val_loss: 0.6971 - val_acc: 0.4644
Epoch 6/20
100/100 [==============================] - 5s 53ms/step - loss: 8.1553 - acc: 0.3816 - val_loss: 0.6975 - val_acc: 0.4681
Epoch 7/20
100/100 [==============================] - 5s 46ms/step - loss: 8.2098 - acc: 0.3866 - val_loss: 0.6962 - val_acc: 0.4869
Epoch 8/20
100/100 [==============================] - 5s 46ms/step - loss: 8.4509 - acc: 0.3719 - val_loss: 0.6983 - val_acc: 0.4556
Epoch 9/20
100/100 [==============================] - 5s 48ms/step - loss: 8.4205 - acc: 0.3759 - val_loss: 0.6965 - val_acc: 0.4662
Epoch 10/20
100/100 [==============================] - 5s 47ms/step - loss: 8.5406 - acc: 0.3615 - val_loss: 0.6951 - val_acc: 0.4938
Epoch 11/20
100/100 [==============================] - 5s 47ms/step - loss: 8.3988 - acc: 0.3764 - val_loss: 0.6959 - val_acc: 0.4825
Epoch 12/20
100/100 [==============================] - 5s 49ms/step - loss: 8.0728 - acc: 0.3791 - val_loss: 0.6964 - val_acc: 0.4775
Epoch 13/20
100/100 [==============================] - 5s 50ms/step - loss: 8.1850 - acc: 0.3781 - val_loss: 0.6965 - val_acc: 0.4769
Epoch 14/20
100/100 [==============================] - 5s 54ms/step - loss: 8.3301 - acc: 0.3738 - val_loss: 0.6965 - val_acc: 0.4800
Epoch 15/20
100/100 [==============================] - 5s 48ms/step - loss: 8.7525 - acc: 0.3653 - val_loss: 0.6965 - val_acc: 0.4744
Epoch 16/20
100/100 [==============================] - 6s 56ms/step - loss: 8.3749 - acc: 0.3741 - val_loss: 0.6961 - val_acc: 0.4819
Epoch 17/20
100/100 [==============================] - 5s 48ms/step - loss: 8.4190 - acc: 0.3706 - val_loss: 0.6976 - val_acc: 0.4587
Epoch 18/20
100/100 [==============================] - 5s 51ms/step - loss: 8.1637 - acc: 0.3809 - val_loss: 0.6959 - val_acc: 0.4794
Epoch 19/20
100/100 [==============================] - 5s 47ms/step - loss: 8.4333 - acc: 0.3741 - val_loss: 0.6947 - val_acc: 0.4994
Epoch 20/20
100/100 [==============================] - 5s 48ms/step - loss: 8.5003 - acc: 0.3600 - val_loss: 0.6980 - val_acc: 0.4594

test accuracy: 0.489375
test loss: 0.6952189660072327
 Dropout Rate: 1.0
Epoch 1/20
100/100 [==============================] - 6s 57ms/step - loss: 0.5349 - acc: 0.7342 - val_loss: 0.3603 - val_acc: 0.8419
Epoch 2/20
100/100 [==============================] - 5s 52ms/step - loss: 0.3749 - acc: 0.8356 - val_loss: 0.4190 - val_acc: 0.8113
Epoch 3/20
100/100 [==============================] - 5s 54ms/step - loss: 0.3408 - acc: 0.8572 - val_loss: 0.3190 - val_acc: 0.8738
Epoch 4/20
100/100 [==============================] - 6s 58ms/step - loss: 0.2930 - acc: 0.8797 - val_loss: 0.3308 - val_acc: 0.8556
Epoch 5/20
100/100 [==============================] - 6s 56ms/step - loss: 0.2770 - acc: 0.8853 - val_loss: 0.3092 - val_acc: 0.8800
Epoch 6/20
100/100 [==============================] - 6s 62ms/step - loss: 0.2713 - acc: 0.8878 - val_loss: 0.2519 - val_acc: 0.9099
Epoch 7/20
100/100 [==============================] - 6s 57ms/step - loss: 0.2695 - acc: 0.8944 - val_loss: 0.2724 - val_acc: 0.8925
Epoch 8/20
100/100 [==============================] - 6s 58ms/step - loss: 0.2570 - acc: 0.8994 - val_loss: 0.2782 - val_acc: 0.8894
Epoch 9/20
100/100 [==============================] - 6s 61ms/step - loss: 0.2437 - acc: 0.9006 - val_loss: 0.2278 - val_acc: 0.9156
Epoch 10/20
100/100 [==============================] - 6s 60ms/step - loss: 0.2416 - acc: 0.8997 - val_loss: 0.2016 - val_acc: 0.9213
Epoch 11/20
100/100 [==============================] - 6s 55ms/step - loss: 0.2380 - acc: 0.9069 - val_loss: 0.2361 - val_acc: 0.9055
Epoch 12/20
100/100 [==============================] - 5s 53ms/step - loss: 0.2177 - acc: 0.9134 - val_loss: 0.2024 - val_acc: 0.9156
Epoch 13/20
100/100 [==============================] - 5s 49ms/step - loss: 0.2244 - acc: 0.9087 - val_loss: 0.2474 - val_acc: 0.8994
Epoch 14/20
100/100 [==============================] - 5s 50ms/step - loss: 0.2243 - acc: 0.9022 - val_loss: 0.2136 - val_acc: 0.9163
Epoch 15/20
100/100 [==============================] - 5s 51ms/step - loss: 0.2233 - acc: 0.9106 - val_loss: 0.2230 - val_acc: 0.9106
Epoch 16/20
100/100 [==============================] - 5s 52ms/step - loss: 0.2059 - acc: 0.9156 - val_loss: 0.2174 - val_acc: 0.9111
Epoch 17/20
100/100 [==============================] - 5s 54ms/step - loss: 0.2024 - acc: 0.9163 - val_loss: 0.2118 - val_acc: 0.9137
Epoch 18/20
100/100 [==============================] - 5s 54ms/step - loss: 0.2085 - acc: 0.9137 - val_loss: 0.2616 - val_acc: 0.8956
Epoch 19/20
100/100 [==============================] - 5s 51ms/step - loss: 0.1867 - acc: 0.9297 - val_loss: 0.2340 - val_acc: 0.9075
Epoch 20/20
100/100 [==============================] - 5s 51ms/step - loss: 0.2023 - acc: 0.9184 - val_loss: 0.2418 - val_acc: 0.9050

test accuracy: 0.9090625
test loss: 0.24463993303477763

2.3.3 Experiments with Different Regularizers\

Five different penalities are put on the higher parameter values: 0.01, 0.02, 0.05. 0.08, 0.09.

In [16]:
def CNN_Regularizer_Selector (activation_function_1, activation_function_2):
    
    K.clear_session()
    
    Adadelta = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
    
    reg = [0, 0.00001, 0.0001, 0.001, 0.1]

    model = [0] * 5
    
    for j in range(0, 5):
        
        model[j] = models.Sequential()
        model[j].add(layers.Conv2D(16, (3, 3), padding='same', activation=activation_function_1,
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
        
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool1'))
        model[j].add(layers.Conv2D(32, (3, 3), padding='same', activation=activation_function_1, name = 'conv2'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool2'))
        model[j].add(layers.Flatten())

        model[j].add(layers.Dense(16, kernel_initializer='glorot_uniform', activation=activation_function_1, 
                                  kernel_regularizer=keras.regularizers.l2(reg[j]), activity_regularizer=keras.regularizers.l1(reg[j]), name='fc1'))
        model[j].add(layers.Dropout(0, noise_shape=None, seed=None))
        
        model[j].add(layers.Dense(1, kernel_initializer='glorot_uniform', activation=activation_function_2, 
                                  kernel_regularizer=keras.regularizers.l2(reg[j]), activity_regularizer=keras.regularizers.l1(reg[j]), name='fc2'))
        model[j].add(layers.Dropout(0, noise_shape=None, seed=None))
        
        model[j].compile(loss='binary_crossentropy', optimizer = Adadelta, metrics=['accuracy'])
    
        print('\n Regulation Penalty:', reg[j])
        
        # Fit the model
        history = model[j].fit_generator(
        train_generator,
        steps_per_epoch=100,
        epochs=20,
        validation_data=validation_generator,
        validation_steps=50,
        verbose=1, 
        # callbacks=[TrainValTensorBoard("logs/{}".format(time()), write_graph=True)]
        )
        test_loss, test_acc = model[j].evaluate_generator(test_generator, steps=100)
        print('\ntest accuracy:', test_acc)
        print('test loss:', test_loss)
    
        acc = history.history['acc']
        val_acc = history.history['val_acc']
        loss = history.history['loss']
        val_loss = history.history['val_loss']
        epochs = range(len(acc))
    
        plt.plot(epochs, acc, 'bo', label='Training acc')
        plt.plot(epochs, val_acc, 'g-', label='Validation acc')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Accuracy")
        plt.title('Training and validation accuracy')
        plt.legend()
        plt.figure()

        plt.plot(epochs, loss, 'bo', label='Training loss')
        plt.plot(epochs, val_loss, 'g-', label='Validation loss')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Loss")
        plt.title('Training and validation loss')
        plt.legend()
        
        plt.show()
In [17]:
CNN_Regularizer_Selector('relu', 'sigmoid')
 Regulation Penalty: 0
Epoch 1/20
100/100 [==============================] - 5s 52ms/step - loss: 0.5576 - acc: 0.7209 - val_loss: 0.4315 - val_acc: 0.8013
Epoch 2/20
100/100 [==============================] - 5s 46ms/step - loss: 0.3913 - acc: 0.8216 - val_loss: 0.3320 - val_acc: 0.8650
Epoch 3/20
100/100 [==============================] - 4s 44ms/step - loss: 0.3420 - acc: 0.8572 - val_loss: 0.2866 - val_acc: 0.8844
Epoch 4/20
100/100 [==============================] - 4s 44ms/step - loss: 0.3343 - acc: 0.8591 - val_loss: 0.3045 - val_acc: 0.8800
Epoch 5/20
100/100 [==============================] - 4s 43ms/step - loss: 0.3215 - acc: 0.8609 - val_loss: 0.2895 - val_acc: 0.8825
Epoch 6/20
100/100 [==============================] - 5s 48ms/step - loss: 0.2948 - acc: 0.8722 - val_loss: 0.2690 - val_acc: 0.8874
Epoch 7/20
100/100 [==============================] - 5s 50ms/step - loss: 0.2561 - acc: 0.8931 - val_loss: 0.2810 - val_acc: 0.9006
Epoch 8/20
100/100 [==============================] - 5s 54ms/step - loss: 0.2532 - acc: 0.8928 - val_loss: 0.2261 - val_acc: 0.9106
Epoch 9/20
100/100 [==============================] - 5s 50ms/step - loss: 0.2523 - acc: 0.9037 - val_loss: 0.2687 - val_acc: 0.8900
Epoch 10/20
100/100 [==============================] - 5s 49ms/step - loss: 0.2340 - acc: 0.9113 - val_loss: 0.2235 - val_acc: 0.9113
Epoch 11/20
100/100 [==============================] - 5s 50ms/step - loss: 0.2248 - acc: 0.9087 - val_loss: 0.2325 - val_acc: 0.9136
Epoch 12/20
100/100 [==============================] - 5s 48ms/step - loss: 0.2378 - acc: 0.9047 - val_loss: 0.3211 - val_acc: 0.8712
Epoch 13/20
100/100 [==============================] - 5s 46ms/step - loss: 0.2378 - acc: 0.9047 - val_loss: 0.2181 - val_acc: 0.9125
Epoch 14/20
100/100 [==============================] - 5s 46ms/step - loss: 0.2318 - acc: 0.9038 - val_loss: 0.2685 - val_acc: 0.8881
Epoch 15/20
100/100 [==============================] - 5s 49ms/step - loss: 0.2036 - acc: 0.9136 - val_loss: 0.2213 - val_acc: 0.9131
Epoch 16/20
100/100 [==============================] - 5s 48ms/step - loss: 0.1981 - acc: 0.9156 - val_loss: 0.2101 - val_acc: 0.9111
Epoch 17/20
100/100 [==============================] - 5s 52ms/step - loss: 0.1937 - acc: 0.9241 - val_loss: 0.3108 - val_acc: 0.8688
Epoch 18/20
100/100 [==============================] - 5s 54ms/step - loss: 0.2035 - acc: 0.9150 - val_loss: 0.2219 - val_acc: 0.9119
Epoch 19/20
100/100 [==============================] - 5s 49ms/step - loss: 0.1893 - acc: 0.9250 - val_loss: 0.2516 - val_acc: 0.8981
Epoch 20/20
100/100 [==============================] - 6s 57ms/step - loss: 0.2108 - acc: 0.9189 - val_loss: 0.2440 - val_acc: 0.9006

test accuracy: 0.890625
test loss: 0.2709206909686327
 Regulation Penalty: 1e-05
Epoch 1/20
100/100 [==============================] - 6s 58ms/step - loss: 0.5605 - acc: 0.6984 - val_loss: 0.3913 - val_acc: 0.8187
Epoch 2/20
100/100 [==============================] - 5s 48ms/step - loss: 0.3820 - acc: 0.8369 - val_loss: 0.3832 - val_acc: 0.8337
Epoch 3/20
100/100 [==============================] - 5s 47ms/step - loss: 0.3246 - acc: 0.8584 - val_loss: 0.3810 - val_acc: 0.8331
Epoch 4/20
100/100 [==============================] - 5s 49ms/step - loss: 0.3420 - acc: 0.8500 - val_loss: 0.2874 - val_acc: 0.8894
Epoch 5/20
100/100 [==============================] - 7s 71ms/step - loss: 0.2931 - acc: 0.8805 - val_loss: 0.2702 - val_acc: 0.8900
Epoch 6/20
100/100 [==============================] - 5s 53ms/step - loss: 0.2732 - acc: 0.8934 - val_loss: 0.2838 - val_acc: 0.8930
Epoch 7/20
100/100 [==============================] - 5s 51ms/step - loss: 0.2613 - acc: 0.8966 - val_loss: 0.2647 - val_acc: 0.8956
Epoch 8/20
100/100 [==============================] - 5s 51ms/step - loss: 0.2809 - acc: 0.8903 - val_loss: 0.2544 - val_acc: 0.9050
Epoch 9/20
100/100 [==============================] - 5s 52ms/step - loss: 0.2580 - acc: 0.8978 - val_loss: 0.2298 - val_acc: 0.9106
Epoch 10/20
100/100 [==============================] - 5s 51ms/step - loss: 0.2590 - acc: 0.8947 - val_loss: 0.2073 - val_acc: 0.9156
Epoch 11/20
100/100 [==============================] - 5s 49ms/step - loss: 0.2532 - acc: 0.9051 - val_loss: 0.2223 - val_acc: 0.9124
Epoch 12/20
100/100 [==============================] - 4s 45ms/step - loss: 0.2420 - acc: 0.9087 - val_loss: 0.3005 - val_acc: 0.8831
Epoch 13/20
100/100 [==============================] - 5s 50ms/step - loss: 0.2373 - acc: 0.9025 - val_loss: 0.2364 - val_acc: 0.9012
Epoch 14/20
100/100 [==============================] - 5s 49ms/step - loss: 0.2286 - acc: 0.9137 - val_loss: 0.2128 - val_acc: 0.9225
Epoch 15/20
100/100 [==============================] - 4s 43ms/step - loss: 0.2217 - acc: 0.9134 - val_loss: 0.2687 - val_acc: 0.8975
Epoch 16/20
100/100 [==============================] - 5s 47ms/step - loss: 0.2289 - acc: 0.9097 - val_loss: 0.2446 - val_acc: 0.8967
Epoch 17/20
100/100 [==============================] - 5s 45ms/step - loss: 0.2016 - acc: 0.9211 - val_loss: 0.2208 - val_acc: 0.9163
Epoch 18/20
100/100 [==============================] - 5s 47ms/step - loss: 0.2123 - acc: 0.9144 - val_loss: 0.2379 - val_acc: 0.8981
Epoch 19/20
100/100 [==============================] - 5s 48ms/step - loss: 0.2090 - acc: 0.9163 - val_loss: 0.2296 - val_acc: 0.9119
Epoch 20/20
100/100 [==============================] - 5s 51ms/step - loss: 0.2032 - acc: 0.9194 - val_loss: 0.2111 - val_acc: 0.9206

test accuracy: 0.91
test loss: 0.25261500317603347
 Regulation Penalty: 0.0001
Epoch 1/20
100/100 [==============================] - 5s 53ms/step - loss: 0.5775 - acc: 0.7113 - val_loss: 0.4115 - val_acc: 0.8231
Epoch 2/20
100/100 [==============================] - 5s 52ms/step - loss: 0.4143 - acc: 0.8205 - val_loss: 0.3731 - val_acc: 0.8512
Epoch 3/20
100/100 [==============================] - 5s 45ms/step - loss: 0.3799 - acc: 0.8459 - val_loss: 0.3288 - val_acc: 0.8738
Epoch 4/20
100/100 [==============================] - 5s 50ms/step - loss: 0.3306 - acc: 0.8731 - val_loss: 0.2947 - val_acc: 0.9050
Epoch 5/20
100/100 [==============================] - 6s 57ms/step - loss: 0.3244 - acc: 0.8675 - val_loss: 0.2766 - val_acc: 0.9019
Epoch 6/20
100/100 [==============================] - 6s 55ms/step - loss: 0.2974 - acc: 0.8825 - val_loss: 0.3319 - val_acc: 0.8680
Epoch 7/20
100/100 [==============================] - 5s 52ms/step - loss: 0.2883 - acc: 0.8816 - val_loss: 0.2738 - val_acc: 0.9106
Epoch 8/20
100/100 [==============================] - 6s 60ms/step - loss: 0.2725 - acc: 0.8962 - val_loss: 0.3104 - val_acc: 0.8900
Epoch 9/20
100/100 [==============================] - 5s 55ms/step - loss: 0.2742 - acc: 0.9003 - val_loss: 0.2804 - val_acc: 0.8969
Epoch 10/20
100/100 [==============================] - 6s 56ms/step - loss: 0.2858 - acc: 0.8909 - val_loss: 0.2472 - val_acc: 0.9081
Epoch 11/20
100/100 [==============================] - 6s 59ms/step - loss: 0.2561 - acc: 0.9044 - val_loss: 0.2412 - val_acc: 0.9212
Epoch 12/20
100/100 [==============================] - 5s 49ms/step - loss: 0.2469 - acc: 0.9100 - val_loss: 0.2691 - val_acc: 0.9062
Epoch 13/20
100/100 [==============================] - 5s 50ms/step - loss: 0.2466 - acc: 0.9094 - val_loss: 0.2392 - val_acc: 0.9119
Epoch 14/20
100/100 [==============================] - 5s 51ms/step - loss: 0.2560 - acc: 0.9025 - val_loss: 0.3198 - val_acc: 0.8631
Epoch 15/20
100/100 [==============================] - 5s 50ms/step - loss: 0.2615 - acc: 0.8995 - val_loss: 0.2371 - val_acc: 0.9213
Epoch 16/20
100/100 [==============================] - 5s 51ms/step - loss: 0.2436 - acc: 0.9147 - val_loss: 0.2445 - val_acc: 0.9168
Epoch 17/20
100/100 [==============================] - 5s 54ms/step - loss: 0.2295 - acc: 0.9116 - val_loss: 0.2444 - val_acc: 0.9125
Epoch 18/20
100/100 [==============================] - 5s 50ms/step - loss: 0.2368 - acc: 0.9150 - val_loss: 0.2659 - val_acc: 0.8994
Epoch 19/20
100/100 [==============================] - 5s 53ms/step - loss: 0.2267 - acc: 0.9170 - val_loss: 0.2478 - val_acc: 0.9087
Epoch 20/20
100/100 [==============================] - 5s 53ms/step - loss: 0.2347 - acc: 0.9181 - val_loss: 0.2679 - val_acc: 0.9075

test accuracy: 0.8975
test loss: 0.28307476796209813
 Regulation Penalty: 0.001
Epoch 1/20
100/100 [==============================] - 6s 56ms/step - loss: 0.6983 - acc: 0.6366 - val_loss: 0.6730 - val_acc: 0.7169
Epoch 2/20
100/100 [==============================] - 5s 50ms/step - loss: 0.6053 - acc: 0.8150 - val_loss: 0.5862 - val_acc: 0.8337
Epoch 3/20
100/100 [==============================] - 5s 50ms/step - loss: 0.5532 - acc: 0.8447 - val_loss: 0.5310 - val_acc: 0.8700
Epoch 4/20
100/100 [==============================] - 5s 49ms/step - loss: 0.5260 - acc: 0.8572 - val_loss: 0.4915 - val_acc: 0.8738
Epoch 5/20
100/100 [==============================] - 5s 50ms/step - loss: 0.4815 - acc: 0.8709 - val_loss: 0.4546 - val_acc: 0.9000
Epoch 6/20
100/100 [==============================] - 5s 53ms/step - loss: 0.4541 - acc: 0.8907 - val_loss: 0.4363 - val_acc: 0.8942
Epoch 7/20
100/100 [==============================] - 5s 50ms/step - loss: 0.4428 - acc: 0.8800 - val_loss: 0.4365 - val_acc: 0.8969
Epoch 8/20
100/100 [==============================] - 5s 49ms/step - loss: 0.4334 - acc: 0.8866 - val_loss: 0.4002 - val_acc: 0.9069
Epoch 9/20
100/100 [==============================] - 5s 52ms/step - loss: 0.4014 - acc: 0.9003 - val_loss: 0.4079 - val_acc: 0.8944
Epoch 10/20
100/100 [==============================] - 5s 50ms/step - loss: 0.3957 - acc: 0.8991 - val_loss: 0.3951 - val_acc: 0.9000
Epoch 11/20
100/100 [==============================] - 5s 52ms/step - loss: 0.3875 - acc: 0.9006 - val_loss: 0.3928 - val_acc: 0.8999
Epoch 12/20
100/100 [==============================] - 5s 51ms/step - loss: 0.3779 - acc: 0.9000 - val_loss: 0.3673 - val_acc: 0.9200
Epoch 13/20
100/100 [==============================] - 5s 51ms/step - loss: 0.3692 - acc: 0.9091 - val_loss: 0.4109 - val_acc: 0.8819
Epoch 14/20
100/100 [==============================] - 5s 53ms/step - loss: 0.3684 - acc: 0.9047 - val_loss: 0.3726 - val_acc: 0.9106
Epoch 15/20
100/100 [==============================] - 5s 55ms/step - loss: 0.3732 - acc: 0.8984 - val_loss: 0.3519 - val_acc: 0.9137
Epoch 16/20
100/100 [==============================] - 5s 52ms/step - loss: 0.3454 - acc: 0.9201 - val_loss: 0.3232 - val_acc: 0.9262
Epoch 17/20
100/100 [==============================] - 5s 51ms/step - loss: 0.3506 - acc: 0.9091 - val_loss: 0.3590 - val_acc: 0.9025
Epoch 18/20
100/100 [==============================] - 5s 54ms/step - loss: 0.3435 - acc: 0.9116 - val_loss: 0.3466 - val_acc: 0.9213
Epoch 19/20
100/100 [==============================] - 5s 50ms/step - loss: 0.3360 - acc: 0.9131 - val_loss: 0.3312 - val_acc: 0.9175
Epoch 20/20
100/100 [==============================] - 5s 51ms/step - loss: 0.3501 - acc: 0.8969 - val_loss: 0.3442 - val_acc: 0.9187

test accuracy: 0.9065625
test loss: 0.3628449746966362
 Regulation Penalty: 0.1
Epoch 1/20
100/100 [==============================] - 6s 60ms/step - loss: 3.2556 - acc: 0.4591 - val_loss: 2.4458 - val_acc: 0.4700
Epoch 2/20
100/100 [==============================] - 5s 50ms/step - loss: 2.2022 - acc: 0.4800 - val_loss: 2.0691 - val_acc: 0.4925
Epoch 3/20
100/100 [==============================] - 5s 52ms/step - loss: 2.0103 - acc: 0.4719 - val_loss: 1.9462 - val_acc: 0.4763
Epoch 4/20
100/100 [==============================] - 5s 51ms/step - loss: 1.8935 - acc: 0.4772 - val_loss: 1.8425 - val_acc: 0.4800
Epoch 5/20
100/100 [==============================] - 5s 50ms/step - loss: 1.8246 - acc: 0.4496 - val_loss: 1.7564 - val_acc: 0.4919
Epoch 6/20
100/100 [==============================] - 5s 54ms/step - loss: 1.7469 - acc: 0.4659 - val_loss: 1.7094 - val_acc: 0.4737
Epoch 7/20
100/100 [==============================] - 5s 55ms/step - loss: 1.6966 - acc: 0.4637 - val_loss: 1.6650 - val_acc: 0.4725
Epoch 8/20
100/100 [==============================] - 5s 51ms/step - loss: 1.6456 - acc: 0.4721 - val_loss: 1.6146 - val_acc: 0.4844
Epoch 9/20
100/100 [==============================] - 5s 51ms/step - loss: 1.6323 - acc: 0.4600 - val_loss: 1.5967 - val_acc: 0.4781
Epoch 10/20
100/100 [==============================] - 5s 50ms/step - loss: 1.5922 - acc: 0.4741 - val_loss: 1.5639 - val_acc: 0.4875
Epoch 11/20
100/100 [==============================] - 5s 54ms/step - loss: 1.6018 - acc: 0.4569 - val_loss: 1.5956 - val_acc: 0.4581
Epoch 12/20
100/100 [==============================] - 5s 53ms/step - loss: 1.5609 - acc: 0.4784 - val_loss: 1.5275 - val_acc: 0.4969
Epoch 13/20
100/100 [==============================] - 5s 51ms/step - loss: 1.5879 - acc: 0.4578 - val_loss: 1.5582 - val_acc: 0.4750
Epoch 14/20
100/100 [==============================] - 5s 51ms/step - loss: 1.5681 - acc: 0.4681 - val_loss: 1.5521 - val_acc: 0.4769
Epoch 15/20
100/100 [==============================] - 5s 51ms/step - loss: 1.5435 - acc: 0.4813 - val_loss: 1.4958 - val_acc: 0.5081
Epoch 16/20
100/100 [==============================] - 5s 53ms/step - loss: 1.5922 - acc: 0.4525 - val_loss: 1.5524 - val_acc: 0.4750
Epoch 17/20
100/100 [==============================] - 5s 50ms/step - loss: 1.5592 - acc: 0.4713 - val_loss: 1.5623 - val_acc: 0.4694
Epoch 18/20
100/100 [==============================] - 5s 50ms/step - loss: 1.5750 - acc: 0.4622 - val_loss: 1.5268 - val_acc: 0.4894
Epoch 19/20
100/100 [==============================] - 5s 52ms/step - loss: 1.5662 - acc: 0.4667 - val_loss: 1.5610 - val_acc: 0.4700
Epoch 20/20
100/100 [==============================] - 5s 54ms/step - loss: 1.5644 - acc: 0.4681 - val_loss: 1.5118 - val_acc: 0.4975

test accuracy: 0.47375
test loss: 1.5542608499526978

2.4 The interpretation of results for experimenting

Based on the accuracy and loss of training, validation, and test sets, the best optimizer for our image dataset is Adadelta, the best rate for dropout is 0, and the best penality for our image dataset is 0.00001.

3. Parameter tuning

3.1 The hypothesis/strategy Ststement of Parameter Tuning

Here, we will explore the optimal values for several parameters for our CNN: Batch size, Learning Rate, Adadelta Decay Factor, and Initial Learning Rate Decay. We will try different bacth sizes: 16, 32, 64, 128, and 256, different learning_rate for the Adadelta optimizer: 0.1, 0.5, 1, 5, and 10, and different decay factor for the Adadelta optimizer: 0, 0.1, 0.5, 0.9, and 1.

3.2 The Types of Tests for Parameter Tuning

Here, the accuracy and loss of training, validation, and test sets obtained from different architectures will be used to choose the optimal architecture. The final values of accuracy and loss and how the accuracy and loss change with epochs will be compared too determine the optimal values.

3.3 The Code and Results for Parameter Tuning

3.3.1 Parameter Tuning -- Determine the Optimal Batch size

The optimal CNN model for the iamge dataset obtained in Part 1 and Part 2 will be used in this Part 3. We will try different bacth sizes: 16, 32, 64, 128, and 256.

In [9]:
def CNN_Parameter_Tuning_1 (activation_function_1, activation_function_2):
        
    K.clear_session()
    
    Adadelta = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)

    model = [0] * 5
    batches = [0] * 5
    
    for j in range(0, 5):
        
        batches[j] = 2**(j+4)
        
        train_generator_new = train_datagen.flow_from_directory(
            train_dir,
            target_size=(64, 64),
            batch_size = batches[j],
            class_mode='binary')
        
        for train_data_batch, train_labels_batch in train_generator_new:
            print('data batch shape:', train_data_batch.shape)
            print('labels batch shape:', train_labels_batch.shape)
            break
        
        model[j] = models.Sequential()
        model[j].add(layers.Conv2D(16, (3, 3), padding='same', activation=activation_function_1,
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
        
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool1'))
        model[j].add(layers.Conv2D(32, (3, 3), padding='same', activation=activation_function_1, name = 'conv2'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool2'))
        model[j].add(layers.Flatten())

        model[j].add(layers.Dense(16, kernel_initializer='glorot_uniform', activation=activation_function_1, 
                                  kernel_regularizer=keras.regularizers.l2(0.00001), activity_regularizer=keras.regularizers.l1(0.00001), name='fc1'))
        model[j].add(layers.Dropout(0, noise_shape=None, seed=None))
        
        model[j].add(layers.Dense(1, kernel_initializer='glorot_uniform', activation=activation_function_2, 
                                  kernel_regularizer=keras.regularizers.l2(0.00001), activity_regularizer=keras.regularizers.l1(0.00001), name='fc2'))
        model[j].add(layers.Dropout(0, noise_shape=None, seed=None))
        
        model[j].compile(loss='binary_crossentropy', optimizer = Adadelta, metrics=['accuracy'])
        
        # Fit the model
        
        history = model[j].fit_generator(
            train_generator_new,
            steps_per_epoch=100,
            epochs=20,
            validation_data=validation_generator,
            validation_steps=50,
            verbose=1, 
        # callbacks=[TrainValTensorBoard("logs/{}".format(time()), write_graph=True)
        )
        test_loss, test_acc = model[j].evaluate_generator(test_generator, steps=100)
        print('\batch_size:', batches[j])
        print('\ntest accuracy:', test_acc)
        print('test loss:', test_loss)
    
        acc = history.history['acc']
        val_acc = history.history['val_acc']
        loss = history.history['loss']
        val_loss = history.history['val_loss']
        epochs = range(len(acc))
    
        plt.plot(epochs, acc, 'bo', label='Training acc')
        plt.plot(epochs, val_acc, 'g-', label='Validation acc')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Accuracy")
        plt.title('Training and validation accuracy')
        plt.legend()
        plt.figure()

        plt.plot(epochs, loss, 'bo', label='Training loss')
        plt.plot(epochs, val_loss, 'g-', label='Validation loss')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Loss")
        plt.title('Training and validation loss')
        plt.legend()
        
        plt.show()
In [10]:
CNN_Parameter_Tuning_1('relu', 'sigmoid')
Found 16315 images belonging to 2 classes.
data batch shape: (16, 64, 64, 3)
labels batch shape: (16,)
Epoch 1/20
100/100 [==============================] - 11s 108ms/step - loss: 0.5810 - acc: 0.7087 - val_loss: 0.4466 - val_acc: 0.7956
Epoch 2/20
100/100 [==============================] - 9s 91ms/step - loss: 0.4276 - acc: 0.7987 - val_loss: 0.3907 - val_acc: 0.8287
Epoch 3/20
100/100 [==============================] - 9s 86ms/step - loss: 0.3638 - acc: 0.8381 - val_loss: 0.3214 - val_acc: 0.8644oss: 0.374
Epoch 4/20
100/100 [==============================] - 11s 108ms/step - loss: 0.3397 - acc: 0.8556 - val_loss: 0.4314 - val_acc: 0.8075
Epoch 5/20
100/100 [==============================] - 9s 91ms/step - loss: 0.3105 - acc: 0.8713 - val_loss: 0.2993 - val_acc: 0.8762
Epoch 6/20
100/100 [==============================] - 9s 91ms/step - loss: 0.2758 - acc: 0.8931 - val_loss: 0.3221 - val_acc: 0.8798
Epoch 7/20
100/100 [==============================] - 7s 72ms/step - loss: 0.3056 - acc: 0.8738 - val_loss: 0.2469 - val_acc: 0.90442s - los
Epoch 8/20
100/100 [==============================] - 5s 54ms/step - loss: 0.2992 - acc: 0.8762 - val_loss: 0.2494 - val_acc: 0.9006
Epoch 9/20
100/100 [==============================] - 6s 57ms/step - loss: 0.2619 - acc: 0.9031 - val_loss: 0.2372 - val_acc: 0.9069
Epoch 10/20
100/100 [==============================] - 7s 68ms/step - loss: 0.2584 - acc: 0.8856 - val_loss: 0.2755 - val_acc: 0.8950
Epoch 11/20
100/100 [==============================] - 7s 66ms/step - loss: 0.2717 - acc: 0.8978 - val_loss: 0.2293 - val_acc: 0.9105
Epoch 12/20
100/100 [==============================] - 7s 75ms/step - loss: 0.2247 - acc: 0.9163 - val_loss: 0.2271 - val_acc: 0.9100
Epoch 13/20
100/100 [==============================] - 7s 66ms/step - loss: 0.2363 - acc: 0.9012 - val_loss: 0.2654 - val_acc: 0.8950
Epoch 14/20
100/100 [==============================] - 7s 65ms/step - loss: 0.2202 - acc: 0.9150 - val_loss: 0.2358 - val_acc: 0.9125
Epoch 15/20
100/100 [==============================] - 7s 66ms/step - loss: 0.2500 - acc: 0.8925 - val_loss: 0.2398 - val_acc: 0.9087
Epoch 16/20
100/100 [==============================] - 6s 63ms/step - loss: 0.2053 - acc: 0.9287 - val_loss: 0.2413 - val_acc: 0.9099
Epoch 17/20
100/100 [==============================] - 7s 73ms/step - loss: 0.2333 - acc: 0.9056 - val_loss: 0.2535 - val_acc: 0.9019
Epoch 18/20
100/100 [==============================] - 7s 67ms/step - loss: 0.2320 - acc: 0.9075 - val_loss: 0.2299 - val_acc: 0.9025oss: 0.2228 
Epoch 19/20
100/100 [==============================] - 8s 79ms/step - loss: 0.2309 - acc: 0.9056 - val_loss: 0.2217 - val_acc: 0.9169
Epoch 20/20
100/100 [==============================] - 7s 70ms/step - loss: 0.2302 - acc: 0.9031 - val_loss: 0.2369 - val_acc: 0.9056
atch_size: 16

test accuracy: 0.9059375
test loss: 0.25391826398670675
Found 16315 images belonging to 2 classes.
data batch shape: (32, 64, 64, 3)
labels batch shape: (32,)
Epoch 1/20
100/100 [==============================] - 11s 106ms/step - loss: 0.5852 - acc: 0.6941 - val_loss: 0.4816 - val_acc: 0.8063
Epoch 2/20
100/100 [==============================] - 10s 98ms/step - loss: 0.3978 - acc: 0.8250 - val_loss: 0.4030 - val_acc: 0.8106
Epoch 3/20
100/100 [==============================] - 10s 100ms/step - loss: 0.3452 - acc: 0.8512 - val_loss: 0.2994 - val_acc: 0.8788
Epoch 4/20
100/100 [==============================] - 10s 98ms/step - loss: 0.3161 - acc: 0.8672 - val_loss: 0.3053 - val_acc: 0.8694
Epoch 5/20
100/100 [==============================] - 10s 96ms/step - loss: 0.2945 - acc: 0.8784 - val_loss: 0.3340 - val_acc: 0.8725
Epoch 6/20
100/100 [==============================] - 7s 74ms/step - loss: 0.2898 - acc: 0.8772 - val_loss: 0.2512 - val_acc: 0.8961
Epoch 7/20
100/100 [==============================] - 9s 86ms/step - loss: 0.2710 - acc: 0.8853 - val_loss: 0.2539 - val_acc: 0.8994
Epoch 8/20
100/100 [==============================] - 8s 75ms/step - loss: 0.2440 - acc: 0.9018 - val_loss: 0.2788 - val_acc: 0.8825- acc: - ETA:
Epoch 9/20
100/100 [==============================] - 8s 81ms/step - loss: 0.2604 - acc: 0.9003 - val_loss: 0.2970 - val_acc: 0.8825
Epoch 10/20
100/100 [==============================] - 8s 77ms/step - loss: 0.2382 - acc: 0.9066 - val_loss: 0.2611 - val_acc: 0.8900
Epoch 11/20
100/100 [==============================] - 9s 85ms/step - loss: 0.2407 - acc: 0.8997 - val_loss: 0.2330 - val_acc: 0.9036
Epoch 12/20
100/100 [==============================] - 7s 75ms/step - loss: 0.2333 - acc: 0.9049 - val_loss: 0.2702 - val_acc: 0.8888loss: 0.2357 - acc: 0.9 - ETA: 0s - loss: 0.2340 - acc: 0.90
Epoch 13/20
100/100 [==============================] - 8s 80ms/step - loss: 0.2256 - acc: 0.9047 - val_loss: 0.2852 - val_acc: 0.8844
Epoch 14/20
100/100 [==============================] - 8s 83ms/step - loss: 0.2190 - acc: 0.9141 - val_loss: 0.2186 - val_acc: 0.9163
Epoch 15/20
100/100 [==============================] - 8s 81ms/step - loss: 0.2375 - acc: 0.9103 - val_loss: 0.2135 - val_acc: 0.9175
Epoch 16/20
100/100 [==============================] - 7s 74ms/step - loss: 0.2112 - acc: 0.9169 - val_loss: 0.2690 - val_acc: 0.8874
Epoch 17/20
100/100 [==============================] - 8s 82ms/step - loss: 0.2048 - acc: 0.9175 - val_loss: 0.2235 - val_acc: 0.9081
Epoch 18/20
100/100 [==============================] - 7s 72ms/step - loss: 0.2083 - acc: 0.9131 - val_loss: 0.1974 - val_acc: 0.9300 ETA: 3s - loss: 0.1 - ETA: 2s - - ETA: 0s - loss: 0.2004 - acc - ETA: 0s - loss: 0.2083 - acc: 0.913
Epoch 19/20
100/100 [==============================] - 8s 76ms/step - loss: 0.2009 - acc: 0.9161 - val_loss: 0.2678 - val_acc: 0.8894
Epoch 20/20
100/100 [==============================] - 7s 74ms/step - loss: 0.2038 - acc: 0.9228 - val_loss: 0.2226 - val_acc: 0.9137
atch_size: 32

test accuracy: 0.9215625
test loss: 0.22294220104813575
Found 16315 images belonging to 2 classes.
data batch shape: (64, 64, 64, 3)
labels batch shape: (64,)
Epoch 1/20
100/100 [==============================] - 12s 116ms/step - loss: 0.5150 - acc: 0.7491 - val_loss: 0.3935 - val_acc: 0.8100
Epoch 2/20
100/100 [==============================] - 11s 113ms/step - loss: 0.3605 - acc: 0.8459 - val_loss: 0.2906 - val_acc: 0.8794
Epoch 3/20
100/100 [==============================] - 12s 117ms/step - loss: 0.3049 - acc: 0.8726 - val_loss: 0.2996 - val_acc: 0.8775
Epoch 4/20
100/100 [==============================] - 13s 125ms/step - loss: 0.2762 - acc: 0.8861 - val_loss: 0.2729 - val_acc: 0.8931
Epoch 5/20
100/100 [==============================] - 13s 135ms/step - loss: 0.2688 - acc: 0.8912 - val_loss: 0.2512 - val_acc: 0.8994
Epoch 6/20
100/100 [==============================] - 13s 135ms/step - loss: 0.2476 - acc: 0.9031 - val_loss: 0.2508 - val_acc: 0.8999
Epoch 7/20
100/100 [==============================] - 14s 138ms/step - loss: 0.2398 - acc: 0.9056 - val_loss: 0.2531 - val_acc: 0.8931
Epoch 8/20
100/100 [==============================] - 13s 134ms/step - loss: 0.2178 - acc: 0.9152 - val_loss: 0.2165 - val_acc: 0.9169
Epoch 9/20
100/100 [==============================] - 16s 156ms/step - loss: 0.2158 - acc: 0.9133 - val_loss: 0.2207 - val_acc: 0.9069
Epoch 10/20
100/100 [==============================] - 13s 133ms/step - loss: 0.2110 - acc: 0.9171 - val_loss: 0.1995 - val_acc: 0.9187
Epoch 11/20
100/100 [==============================] - 14s 136ms/step - loss: 0.2061 - acc: 0.9150 - val_loss: 0.1853 - val_acc: 0.9243
Epoch 12/20
100/100 [==============================] - 13s 133ms/step - loss: 0.1937 - acc: 0.9246 - val_loss: 0.2005 - val_acc: 0.9213
Epoch 13/20
100/100 [==============================] - 13s 133ms/step - loss: 0.1878 - acc: 0.9280 - val_loss: 0.1972 - val_acc: 0.9244
Epoch 14/20
100/100 [==============================] - 15s 145ms/step - loss: 0.1763 - acc: 0.9332 - val_loss: 0.2014 - val_acc: 0.9269
Epoch 15/20
100/100 [==============================] - 15s 152ms/step - loss: 0.1796 - acc: 0.9250 - val_loss: 0.2110 - val_acc: 0.9125
Epoch 16/20
100/100 [==============================] - 13s 135ms/step - loss: 0.1634 - acc: 0.9323 - val_loss: 0.2025 - val_acc: 0.9193
Epoch 17/20
100/100 [==============================] - 14s 137ms/step - loss: 0.1679 - acc: 0.9333 - val_loss: 0.1720 - val_acc: 0.9231
Epoch 18/20
100/100 [==============================] - 14s 137ms/step - loss: 0.1660 - acc: 0.9373 - val_loss: 0.2003 - val_acc: 0.9250
Epoch 19/20
100/100 [==============================] - 13s 131ms/step - loss: 0.1425 - acc: 0.9457 - val_loss: 0.1896 - val_acc: 0.9250
Epoch 20/20
100/100 [==============================] - 13s 132ms/step - loss: 0.1543 - acc: 0.9389 - val_loss: 0.2106 - val_acc: 0.9156
atch_size: 64

test accuracy: 0.9178125
test loss: 0.23080881215631963
Found 16315 images belonging to 2 classes.
data batch shape: (128, 64, 64, 3)
labels batch shape: (128,)
Epoch 1/20
100/100 [==============================] - 24s 236ms/step - loss: 0.5499 - acc: 0.7256 - val_loss: 0.3853 - val_acc: 0.8331
Epoch 2/20
100/100 [==============================] - 19s 185ms/step - loss: 0.3744 - acc: 0.8401 - val_loss: 0.3074 - val_acc: 0.8762
Epoch 3/20
100/100 [==============================] - 18s 178ms/step - loss: 0.3141 - acc: 0.8689 - val_loss: 0.2707 - val_acc: 0.8869
Epoch 4/20
100/100 [==============================] - 17s 172ms/step - loss: 0.2948 - acc: 0.8753 - val_loss: 0.2475 - val_acc: 0.9006
Epoch 5/20
100/100 [==============================] - 18s 185ms/step - loss: 0.2542 - acc: 0.8998 - val_loss: 0.2645 - val_acc: 0.8944
Epoch 6/20
100/100 [==============================] - 17s 170ms/step - loss: 0.2453 - acc: 0.9035 - val_loss: 0.2024 - val_acc: 0.9130
Epoch 7/20
100/100 [==============================] - 17s 173ms/step - loss: 0.2228 - acc: 0.9141 - val_loss: 0.2192 - val_acc: 0.9081
Epoch 8/20
100/100 [==============================] - 17s 166ms/step - loss: 0.2226 - acc: 0.9124 - val_loss: 0.2271 - val_acc: 0.9094
Epoch 9/20
100/100 [==============================] - 18s 180ms/step - loss: 0.2107 - acc: 0.9179 - val_loss: 0.1812 - val_acc: 0.9350
Epoch 10/20
100/100 [==============================] - 17s 173ms/step - loss: 0.1998 - acc: 0.9215 - val_loss: 0.2154 - val_acc: 0.9119
Epoch 11/20
100/100 [==============================] - 17s 165ms/step - loss: 0.1886 - acc: 0.9273 - val_loss: 0.1771 - val_acc: 0.9374
Epoch 12/20
100/100 [==============================] - 17s 173ms/step - loss: 0.1883 - acc: 0.9290 - val_loss: 0.2825 - val_acc: 0.8856
Epoch 13/20
100/100 [==============================] - 18s 182ms/step - loss: 0.1693 - acc: 0.9335 - val_loss: 0.1998 - val_acc: 0.9250
Epoch 14/20
100/100 [==============================] - 17s 166ms/step - loss: 0.1730 - acc: 0.9340 - val_loss: 0.1860 - val_acc: 0.9194
Epoch 15/20
100/100 [==============================] - 17s 171ms/step - loss: 0.1616 - acc: 0.9391 - val_loss: 0.2008 - val_acc: 0.9206
Epoch 16/20
100/100 [==============================] - 17s 171ms/step - loss: 0.1491 - acc: 0.9440 - val_loss: 0.2267 - val_acc: 0.9136
Epoch 17/20
100/100 [==============================] - 17s 169ms/step - loss: 0.1531 - acc: 0.9433 - val_loss: 0.1881 - val_acc: 0.9281
Epoch 18/20
100/100 [==============================] - 17s 171ms/step - loss: 0.1377 - acc: 0.9513 - val_loss: 0.1963 - val_acc: 0.91444s -
Epoch 19/20
100/100 [==============================] - 18s 180ms/step - loss: 0.1297 - acc: 0.9537 - val_loss: 0.2594 - val_acc: 0.9119
Epoch 20/20
100/100 [==============================] - 17s 165ms/step - loss: 0.1213 - acc: 0.9554 - val_loss: 0.1991 - val_acc: 0.9175
atch_size: 128

test accuracy: 0.9203125
test loss: 0.22447027273476125
Found 16315 images belonging to 2 classes.
data batch shape: (256, 64, 64, 3)
labels batch shape: (256,)
Epoch 1/20
100/100 [==============================] - 32s 321ms/step - loss: 0.4963 - acc: 0.7649 - val_loss: 0.4189 - val_acc: 0.7994
Epoch 2/20
100/100 [==============================] - 33s 329ms/step - loss: 0.3460 - acc: 0.8538 - val_loss: 0.2948 - val_acc: 0.8725
Epoch 3/20
100/100 [==============================] - 34s 339ms/step - loss: 0.2971 - acc: 0.8809 - val_loss: 0.2444 - val_acc: 0.8969
Epoch 4/20
100/100 [==============================] - 34s 336ms/step - loss: 0.2707 - acc: 0.8940 - val_loss: 0.2473 - val_acc: 0.9025
Epoch 5/20
100/100 [==============================] - 34s 339ms/step - loss: 0.2452 - acc: 0.9053 - val_loss: 0.2635 - val_acc: 0.8969
Epoch 6/20
100/100 [==============================] - 34s 339ms/step - loss: 0.2365 - acc: 0.9082 - val_loss: 0.2221 - val_acc: 0.9118
Epoch 7/20
100/100 [==============================] - 33s 330ms/step - loss: 0.2295 - acc: 0.9096 - val_loss: 0.2185 - val_acc: 0.9181
Epoch 8/20
100/100 [==============================] - 34s 336ms/step - loss: 0.2118 - acc: 0.9183 - val_loss: 0.1969 - val_acc: 0.9206
Epoch 9/20
100/100 [==============================] - 34s 341ms/step - loss: 0.2039 - acc: 0.9231 - val_loss: 0.2447 - val_acc: 0.8981
Epoch 10/20
100/100 [==============================] - 34s 338ms/step - loss: 0.1960 - acc: 0.9261 - val_loss: 0.1933 - val_acc: 0.9231
Epoch 11/20
100/100 [==============================] - 34s 337ms/step - loss: 0.1889 - acc: 0.9278 - val_loss: 0.2052 - val_acc: 0.9168
Epoch 12/20
100/100 [==============================] - 35s 346ms/step - loss: 0.1705 - acc: 0.9375 - val_loss: 0.1743 - val_acc: 0.9306
Epoch 13/20
100/100 [==============================] - 35s 350ms/step - loss: 0.1659 - acc: 0.9405 - val_loss: 0.3212 - val_acc: 0.8650
Epoch 14/20
100/100 [==============================] - 34s 341ms/step - loss: 0.1594 - acc: 0.9417 - val_loss: 0.1782 - val_acc: 0.9319
Epoch 15/20
100/100 [==============================] - 37s 366ms/step - loss: 0.1487 - acc: 0.9487 - val_loss: 0.2321 - val_acc: 0.9163
Epoch 16/20
100/100 [==============================] - 36s 362ms/step - loss: 0.1330 - acc: 0.9550 - val_loss: 0.2667 - val_acc: 0.8961
Epoch 17/20
100/100 [==============================] - 35s 355ms/step - loss: 0.1268 - acc: 0.9589 - val_loss: 0.2192 - val_acc: 0.9169
Epoch 18/20
100/100 [==============================] - 38s 382ms/step - loss: 0.1194 - acc: 0.9631 - val_loss: 0.2686 - val_acc: 0.9094
Epoch 19/20
100/100 [==============================] - 40s 401ms/step - loss: 0.1088 - acc: 0.9683 - val_loss: 0.2256 - val_acc: 0.9225
Epoch 20/20
100/100 [==============================] - 38s 383ms/step - loss: 0.1008 - acc: 0.9712 - val_loss: 0.2051 - val_acc: 0.9194
atch_size: 256

test accuracy: 0.915625
test loss: 0.2105775685235858

3.3.2 Parameter Tuning -- Determine the Optimal Learning Rate

The optimal CNN model for the iamge dataset obtained in Part 1 and Part 2 will be used in this Part 3. We will try different learning_rate: 0.1, 0.5, 1, 5, and 10.

In [11]:
def CNN_Parameter_Tuning_2 (activation_function_1, activation_function_2):
        
    K.clear_session()
    
    batch_size = 16
    
    train_generator_new = train_datagen.flow_from_directory(
            train_dir,
            target_size=(64, 64),
            batch_size = batch_size,
            class_mode='binary')
    
    learning_rate = [0.1, 0.5, 1, 5, 10]

    model = [0] * 5
    
    for j in range(0, 5):
        
        Adadelta = optimizers.Adadelta(lr=learning_rate[j], rho=0.95, epsilon=None, decay=0.0)
        
        model[j] = models.Sequential()
        model[j].add(layers.Conv2D(16, (3, 3), padding='same', activation=activation_function_1,
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
        
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool1'))
        model[j].add(layers.Conv2D(32, (3, 3), padding='same', activation=activation_function_1, name = 'conv2'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool2'))
        model[j].add(layers.Flatten())

        model[j].add(layers.Dense(16, kernel_initializer='glorot_uniform', activation=activation_function_1, 
                                  kernel_regularizer=keras.regularizers.l2(0.00001), activity_regularizer=keras.regularizers.l1(0.00001), name='fc1'))
        model[j].add(layers.Dropout(0, noise_shape=None, seed=None))
        
        model[j].add(layers.Dense(1, kernel_initializer='glorot_uniform', activation=activation_function_2, 
                                  kernel_regularizer=keras.regularizers.l2(0.00001), activity_regularizer=keras.regularizers.l1(0.00001), name='fc2'))
        model[j].add(layers.Dropout(0, noise_shape=None, seed=None))
        
        model[j].compile(loss='binary_crossentropy', optimizer = Adadelta, metrics=['accuracy'])
        
        # Fit the model        
        history = model[j].fit_generator(
        train_generator_new,
        steps_per_epoch=100,
        epochs=20,
        validation_data=validation_generator,
        validation_steps=50,
        verbose=1, 
        # callbacks=[TrainValTensorBoard("logs/{}".format(time()), write_graph=True)]
        )
        test_loss, test_acc = model[j].evaluate_generator(test_generator, steps=100)
        print('learning rate:', learning_rate[j])
        print('\ntest accuracy:', test_acc)
        print('test loss:', test_loss)
    
        acc = history.history['acc']
        val_acc = history.history['val_acc']
        loss = history.history['loss']
        val_loss = history.history['val_loss']
        epochs = range(len(acc))
    
        plt.plot(epochs, acc, 'bo', label='Training acc')
        plt.plot(epochs, val_acc, 'g-', label='Validation acc')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Accuracy")
        plt.title('Training and validation accuracy')
        plt.legend()
        plt.figure()

        plt.plot(epochs, loss, 'bo', label='Training loss')
        plt.plot(epochs, val_loss, 'g-', label='Validation loss')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Loss")
        plt.title('Training and validation loss')
        plt.legend()
        
        plt.show()
In [12]:
CNN_Parameter_Tuning_2('relu', 'sigmoid')
Found 16315 images belonging to 2 classes.
Epoch 1/20
100/100 [==============================] - 11s 105ms/step - loss: 0.6210 - acc: 0.6694 - val_loss: 0.5831 - val_acc: 0.6825
Epoch 2/20
100/100 [==============================] - 10s 99ms/step - loss: 0.4925 - acc: 0.7841 - val_loss: 0.4527 - val_acc: 0.7875
Epoch 3/20
100/100 [==============================] - 9s 86ms/step - loss: 0.4310 - acc: 0.7987 - val_loss: 0.4124 - val_acc: 0.8213s - loss: 0.4342 - acc
Epoch 4/20
100/100 [==============================] - 8s 83ms/step - loss: 0.4134 - acc: 0.7969 - val_loss: 0.3830 - val_acc: 0.8275
Epoch 5/20
100/100 [==============================] - 10s 105ms/step - loss: 0.3672 - acc: 0.8400 - val_loss: 0.3554 - val_acc: 0.8319
Epoch 6/20
100/100 [==============================] - 10s 96ms/step - loss: 0.3758 - acc: 0.8312 - val_loss: 0.3354 - val_acc: 0.8642
Epoch 7/20
100/100 [==============================] - 11s 110ms/step - loss: 0.3607 - acc: 0.8450 - val_loss: 0.3275 - val_acc: 0.8569
Epoch 8/20
100/100 [==============================] - 11s 111ms/step - loss: 0.3294 - acc: 0.8581 - val_loss: 0.3187 - val_acc: 0.8625
Epoch 9/20
100/100 [==============================] - 6s 62ms/step - loss: 0.3409 - acc: 0.8612 - val_loss: 0.3246 - val_acc: 0.8656
Epoch 10/20
100/100 [==============================] - 7s 72ms/step - loss: 0.3244 - acc: 0.8606 - val_loss: 0.3191 - val_acc: 0.8700
Epoch 11/20
100/100 [==============================] - 6s 62ms/step - loss: 0.3382 - acc: 0.8588 - val_loss: 0.3027 - val_acc: 0.8742
Epoch 12/20
100/100 [==============================] - 5s 54ms/step - loss: 0.3134 - acc: 0.8656 - val_loss: 0.3006 - val_acc: 0.8738
Epoch 13/20
100/100 [==============================] - 8s 80ms/step - loss: 0.2871 - acc: 0.8838 - val_loss: 0.3091 - val_acc: 0.8769
Epoch 14/20
100/100 [==============================] - 7s 72ms/step - loss: 0.3037 - acc: 0.8650 - val_loss: 0.2840 - val_acc: 0.8894
Epoch 15/20
100/100 [==============================] - 7s 65ms/step - loss: 0.3059 - acc: 0.8713 - val_loss: 0.2869 - val_acc: 0.8781
Epoch 16/20
100/100 [==============================] - 6s 65ms/step - loss: 0.3043 - acc: 0.8819 - val_loss: 0.2964 - val_acc: 0.8767
Epoch 17/20
100/100 [==============================] - 6s 61ms/step - loss: 0.2877 - acc: 0.8769 - val_loss: 0.2674 - val_acc: 0.8931
Epoch 18/20
100/100 [==============================] - 6s 59ms/step - loss: 0.2904 - acc: 0.8787 - val_loss: 0.2939 - val_acc: 0.8819
Epoch 19/20
100/100 [==============================] - 8s 80ms/step - loss: 0.2904 - acc: 0.8826 - val_loss: 0.2770 - val_acc: 0.8900
Epoch 20/20
100/100 [==============================] - 9s 86ms/step - loss: 0.2823 - acc: 0.8806 - val_loss: 0.2802 - val_acc: 0.8894
learning rate: 0.1

test accuracy: 0.8921875
test loss: 0.27938420727849006
Epoch 1/20
100/100 [==============================] - 9s 92ms/step - loss: 0.5743 - acc: 0.7119 - val_loss: 0.4349 - val_acc: 0.7981
Epoch 2/20
100/100 [==============================] - 7s 73ms/step - loss: 0.4250 - acc: 0.8117 - val_loss: 0.3788 - val_acc: 0.8387
Epoch 3/20
100/100 [==============================] - 10s 103ms/step - loss: 0.3968 - acc: 0.8275 - val_loss: 0.4212 - val_acc: 0.8237
Epoch 4/20
100/100 [==============================] - 9s 91ms/step - loss: 0.3656 - acc: 0.8400 - val_loss: 0.3335 - val_acc: 0.8550
Epoch 5/20
100/100 [==============================] - 8s 84ms/step - loss: 0.3588 - acc: 0.8456 - val_loss: 0.3254 - val_acc: 0.8625
Epoch 6/20
100/100 [==============================] - 8s 83ms/step - loss: 0.3336 - acc: 0.8662 - val_loss: 0.2956 - val_acc: 0.8736
Epoch 7/20
100/100 [==============================] - 7s 71ms/step - loss: 0.3116 - acc: 0.8738 - val_loss: 0.3290 - val_acc: 0.8638oss: 0.3122 - acc: 0.87
Epoch 8/20
100/100 [==============================] - 7s 73ms/step - loss: 0.3082 - acc: 0.8738 - val_loss: 0.2574 - val_acc: 0.9006
Epoch 9/20
100/100 [==============================] - 13s 127ms/step - loss: 0.2937 - acc: 0.8787 - val_loss: 0.2601 - val_acc: 0.8938
Epoch 10/20
100/100 [==============================] - 15s 154ms/step - loss: 0.2675 - acc: 0.8900 - val_loss: 0.2999 - val_acc: 0.8794
Epoch 11/20
100/100 [==============================] - 11s 105ms/step - loss: 0.2651 - acc: 0.9006 - val_loss: 0.2499 - val_acc: 0.9068
Epoch 12/20
100/100 [==============================] - 13s 127ms/step - loss: 0.2764 - acc: 0.8901 - val_loss: 0.2403 - val_acc: 0.9094oss: 0.2814 - acc: 0. - ETA: 0s - loss: 0.2776 - acc: 0.889
Epoch 13/20
100/100 [==============================] - 14s 139ms/step - loss: 0.2400 - acc: 0.9006 - val_loss: 0.3290 - val_acc: 0.8644
Epoch 14/20
100/100 [==============================] - 10s 98ms/step - loss: 0.2496 - acc: 0.9044 - val_loss: 0.2637 - val_acc: 0.8862
Epoch 15/20
100/100 [==============================] - 9s 91ms/step - loss: 0.2464 - acc: 0.9025 - val_loss: 0.3748 - val_acc: 0.8400ss: 0.2459
Epoch 16/20
100/100 [==============================] - 9s 88ms/step - loss: 0.2639 - acc: 0.8975 - val_loss: 0.2309 - val_acc: 0.9118
Epoch 17/20
100/100 [==============================] - 10s 105ms/step - loss: 0.2461 - acc: 0.9069 - val_loss: 0.2289 - val_acc: 0.9062
Epoch 18/20
100/100 [==============================] - 9s 91ms/step - loss: 0.2353 - acc: 0.8969 - val_loss: 0.2469 - val_acc: 0.9050
Epoch 19/20
100/100 [==============================] - 9s 90ms/step - loss: 0.2442 - acc: 0.9131 - val_loss: 0.2220 - val_acc: 0.9150
Epoch 20/20
100/100 [==============================] - 10s 100ms/step - loss: 0.2399 - acc: 0.9094 - val_loss: 0.2287 - val_acc: 0.9062
learning rate: 0.5

test accuracy: 0.9103125
test loss: 0.2490104205161333
Epoch 1/20
100/100 [==============================] - 12s 120ms/step - loss: 0.5498 - acc: 0.7262 - val_loss: 0.4034 - val_acc: 0.8063
Epoch 2/20
100/100 [==============================] - 11s 105ms/step - loss: 0.4390 - acc: 0.8000 - val_loss: 0.3684 - val_acc: 0.8375
Epoch 3/20
100/100 [==============================] - 10s 96ms/step - loss: 0.3525 - acc: 0.8563 - val_loss: 0.4210 - val_acc: 0.8187 - loss: 0.3529 - ETA: 0s - loss: 0.3544 - acc: 0.85
Epoch 4/20
100/100 [==============================] - 14s 135ms/step - loss: 0.3501 - acc: 0.8462 - val_loss: 0.2879 - val_acc: 0.8881 1s - loss: 0.3459 - 
Epoch 5/20
100/100 [==============================] - 17s 173ms/step - loss: 0.2988 - acc: 0.8719 - val_loss: 0.3141 - val_acc: 0.8725
Epoch 6/20
100/100 [==============================] - 16s 160ms/step - loss: 0.3086 - acc: 0.8662 - val_loss: 0.3246 - val_acc: 0.8655-
Epoch 7/20
100/100 [==============================] - 12s 125ms/step - loss: 0.2990 - acc: 0.8750 - val_loss: 0.3323 - val_acc: 0.8806
Epoch 8/20
100/100 [==============================] - 10s 97ms/step - loss: 0.2716 - acc: 0.8856 - val_loss: 0.2483 - val_acc: 0.9000
Epoch 9/20
100/100 [==============================] - 11s 114ms/step - loss: 0.2754 - acc: 0.8888 - val_loss: 0.5319 - val_acc: 0.7937
Epoch 10/20
100/100 [==============================] - 11s 111ms/step - loss: 0.2635 - acc: 0.9031 - val_loss: 0.2416 - val_acc: 0.9062
Epoch 11/20
100/100 [==============================] - 9s 86ms/step - loss: 0.2526 - acc: 0.9081 - val_loss: 0.2247 - val_acc: 0.9111
Epoch 12/20
100/100 [==============================] - 10s 97ms/step - loss: 0.2426 - acc: 0.9012 - val_loss: 0.2590 - val_acc: 0.8944
Epoch 13/20
100/100 [==============================] - 9s 89ms/step - loss: 0.2386 - acc: 0.8988 - val_loss: 0.2705 - val_acc: 0.8981
Epoch 14/20
100/100 [==============================] - 11s 107ms/step - loss: 0.2334 - acc: 0.9175 - val_loss: 0.2518 - val_acc: 0.9062
Epoch 15/20
100/100 [==============================] - 10s 99ms/step - loss: 0.2378 - acc: 0.9019 - val_loss: 0.2971 - val_acc: 0.8950
Epoch 16/20
100/100 [==============================] - 11s 106ms/step - loss: 0.2540 - acc: 0.9056 - val_loss: 0.2531 - val_acc: 0.9011
Epoch 17/20
100/100 [==============================] - 10s 101ms/step - loss: 0.2352 - acc: 0.9119 - val_loss: 0.2341 - val_acc: 0.9119
Epoch 18/20
100/100 [==============================] - 11s 113ms/step - loss: 0.2594 - acc: 0.8981 - val_loss: 0.2691 - val_acc: 0.8969
Epoch 19/20
100/100 [==============================] - 10s 102ms/step - loss: 0.2457 - acc: 0.9012 - val_loss: 0.2384 - val_acc: 0.9106
Epoch 20/20
100/100 [==============================] - 11s 111ms/step - loss: 0.2301 - acc: 0.9075 - val_loss: 0.2940 - val_acc: 0.8931
learning rate: 1

test accuracy: 0.88875
test loss: 0.2884938246384263
Epoch 1/20
100/100 [==============================] - 12s 116ms/step - loss: 0.6760 - acc: 0.6162 - val_loss: 0.5791 - val_acc: 0.7550
Epoch 2/20
100/100 [==============================] - 10s 105ms/step - loss: 0.5642 - acc: 0.7319 - val_loss: 0.5029 - val_acc: 0.7919s - loss: 0.5663
Epoch 3/20
100/100 [==============================] - 11s 112ms/step - loss: 0.4861 - acc: 0.7981 - val_loss: 0.5526 - val_acc: 0.7788
Epoch 4/20
100/100 [==============================] - 9s 95ms/step - loss: 0.4538 - acc: 0.8200 - val_loss: 0.4201 - val_acc: 0.8300
Epoch 5/20
100/100 [==============================] - 10s 103ms/step - loss: 0.4333 - acc: 0.8269 - val_loss: 0.3880 - val_acc: 0.8444
Epoch 6/20
100/100 [==============================] - 11s 106ms/step - loss: 0.3736 - acc: 0.8469 - val_loss: 0.4187 - val_acc: 0.8129
Epoch 7/20
100/100 [==============================] - 10s 104ms/step - loss: 0.3960 - acc: 0.8500 - val_loss: 0.3355 - val_acc: 0.8731
Epoch 8/20
100/100 [==============================] - 14s 141ms/step - loss: 0.3580 - acc: 0.8606 - val_loss: 0.3449 - val_acc: 0.8606
Epoch 9/20
100/100 [==============================] - 11s 109ms/step - loss: 0.3577 - acc: 0.8656 - val_loss: 0.4282 - val_acc: 0.8419
Epoch 10/20
100/100 [==============================] - 9s 93ms/step - loss: 0.3074 - acc: 0.8700 - val_loss: 0.2806 - val_acc: 0.8994
Epoch 11/20
100/100 [==============================] - 10s 97ms/step - loss: 0.2680 - acc: 0.8894 - val_loss: 0.3134 - val_acc: 0.8911
Epoch 12/20
100/100 [==============================] - 9s 93ms/step - loss: 0.3050 - acc: 0.8769 - val_loss: 0.2744 - val_acc: 0.8794
Epoch 13/20
100/100 [==============================] - 10s 100ms/step - loss: 0.2546 - acc: 0.9000 - val_loss: 0.2534 - val_acc: 0.9025
Epoch 14/20
100/100 [==============================] - 10s 102ms/step - loss: 0.2576 - acc: 0.8988 - val_loss: 0.2358 - val_acc: 0.9019
Epoch 15/20
100/100 [==============================] - 9s 90ms/step - loss: 0.2392 - acc: 0.9081 - val_loss: 0.3211 - val_acc: 0.8838
Epoch 16/20
100/100 [==============================] - 9s 89ms/step - loss: 0.2398 - acc: 0.9006 - val_loss: 0.2416 - val_acc: 0.9024
Epoch 17/20
100/100 [==============================] - 9s 91ms/step - loss: 0.2833 - acc: 0.8850 - val_loss: 0.2649 - val_acc: 0.88812864 - acc: 0.86 - ETA: 3s - loss: 0.2808 - acc: - E - ETA: 0s - loss: 0.2806 - acc: 0.885
Epoch 18/20
100/100 [==============================] - 11s 109ms/step - loss: 0.2770 - acc: 0.8910 - val_loss: 0.2173 - val_acc: 0.9163
Epoch 19/20
100/100 [==============================] - 9s 95ms/step - loss: 0.2468 - acc: 0.9019 - val_loss: 0.2576 - val_acc: 0.9019
Epoch 20/20
100/100 [==============================] - 8s 85ms/step - loss: 0.2768 - acc: 0.8931 - val_loss: 0.2124 - val_acc: 0.9181
learning rate: 5

test accuracy: 0.92
test loss: 0.23465282700955867
Epoch 1/20
100/100 [==============================] - 11s 105ms/step - loss: 0.7994 - acc: 0.5000 - val_loss: 0.6915 - val_acc: 0.5387
Epoch 2/20
100/100 [==============================] - 10s 102ms/step - loss: 0.6915 - acc: 0.5363 - val_loss: 0.6942 - val_acc: 0.5169
Epoch 3/20
100/100 [==============================] - 9s 86ms/step - loss: 0.6907 - acc: 0.5406 - val_loss: 0.6956 - val_acc: 0.5031
Epoch 4/20
100/100 [==============================] - 8s 84ms/step - loss: 0.6930 - acc: 0.5244 - val_loss: 0.6940 - val_acc: 0.5088A: 0s - loss: 0.6930 - acc: 0.5
Epoch 5/20
100/100 [==============================] - 9s 91ms/step - loss: 0.6904 - acc: 0.5450 - val_loss: 0.6939 - val_acc: 0.5244
Epoch 6/20
100/100 [==============================] - 8s 84ms/step - loss: 0.6933 - acc: 0.5240 - val_loss: 0.6928 - val_acc: 0.5207
Epoch 7/20
100/100 [==============================] - 9s 89ms/step - loss: 0.6911 - acc: 0.5400 - val_loss: 0.6916 - val_acc: 0.5331
Epoch 8/20
100/100 [==============================] - 9s 85ms/step - loss: 0.6882 - acc: 0.5537 - val_loss: 0.6981 - val_acc: 0.5131
Epoch 9/20
100/100 [==============================] - 9s 94ms/step - loss: 0.6955 - acc: 0.5088 - val_loss: 0.6931 - val_acc: 0.5169
Epoch 10/20
100/100 [==============================] - 8s 85ms/step - loss: 0.6925 - acc: 0.5288 - val_loss: 0.6950 - val_acc: 0.5050
Epoch 11/20
100/100 [==============================] - 8s 80ms/step - loss: 0.6923 - acc: 0.5306 - val_loss: 0.6897 - val_acc: 0.5494
Epoch 12/20
100/100 [==============================] - 8s 76ms/step - loss: 0.6871 - acc: 0.5613 - val_loss: 0.7005 - val_acc: 0.4919
Epoch 13/20
100/100 [==============================] - 9s 89ms/step - loss: 0.6924 - acc: 0.5302 - val_loss: 0.6902 - val_acc: 0.54196892 - ETA: 4s -  - ETA: 1s - loss: 0.69
Epoch 14/20
100/100 [==============================] - 9s 90ms/step - loss: 0.6937 - acc: 0.5125 - val_loss: 0.6934 - val_acc: 0.5100s 
Epoch 15/20
100/100 [==============================] - 9s 87ms/step - loss: 0.6932 - acc: 0.5219 - val_loss: 0.6930 - val_acc: 0.5175A: 4s - l - ETA: 1s - los
Epoch 16/20
100/100 [==============================] - 8s 76ms/step - loss: 0.6904 - acc: 0.5450 - val_loss: 0.6952 - val_acc: 0.5106A: 4 - ETA: 2s - loss: 0 - ETA: 1s - loss: 0.6896 - acc:  - ETA: 1s - loss: 0.6894 - a - ETA: 0s - loss: 0.6901 - acc: 
Epoch 17/20
100/100 [==============================] - 8s 78ms/step - loss: 0.6938 - acc: 0.5162 - val_loss: 0.6923 - val_acc: 0.5306s: 0.
Epoch 18/20
100/100 [==============================] - 8s 83ms/step - loss: 0.6927 - acc: 0.5256 - val_loss: 0.6929 - val_acc: 0.5200
Epoch 19/20
100/100 [==============================] - 8s 84ms/step - loss: 0.6905 - acc: 0.5431 - val_loss: 0.6955 - val_acc: 0.5062
Epoch 20/20
100/100 [==============================] - 8s 80ms/step - loss: 0.6915 - acc: 0.5363 - val_loss: 0.6946 - val_acc: 0.5106
learning rate: 10

test accuracy: 0.5328125
test loss: 0.6913891983032227

3.3.3 Parameter Tuning -- Determine the Adadelta Decay Factor

The optimal CNN model for the iamge dataset obtained in Part 1 and Part 2 will be used. We will try different decay factor (i.e., the fraction of gradient to keep at each time step), including 0, 0.1, 0.5, 0.9, and 1.

In [16]:
def CNN_Parameter_Tuning_3 (activation_function_1, activation_function_2):
        
    K.clear_session()
    
    decay_factor = [0, 0.1, 0.5, 0.9, 1]

    model = [0] * 5
    
    for j in range(0, 5):
        
        Adadelta = optimizers.Adadelta(lr=0.1, rho = decay_factor[j], epsilon=None, decay=0.0)
        
        model[j] = models.Sequential()
        model[j].add(layers.Conv2D(16, (3, 3), padding='same', activation=activation_function_1,
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
        
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool1'))
        model[j].add(layers.Conv2D(32, (3, 3), padding='same', activation=activation_function_1, name = 'conv2'))
        model[j].add(layers.MaxPooling2D((2, 2), name='max_pool2'))
        model[j].add(layers.Flatten())

        model[j].add(layers.Dense(16, kernel_initializer='glorot_uniform', activation=activation_function_1, 
                                  kernel_regularizer=keras.regularizers.l2(0.00001), activity_regularizer=keras.regularizers.l1(0.00001), name='fc1'))
        model[j].add(layers.Dropout(0, noise_shape=None, seed=None))
        
        model[j].add(layers.Dense(1, kernel_initializer='glorot_uniform', activation=activation_function_2, 
                                  kernel_regularizer=keras.regularizers.l2(0.00001), activity_regularizer=keras.regularizers.l1(0.00001), name='fc2'))
        model[j].add(layers.Dropout(0, noise_shape=None, seed=None))
        
        model[j].compile(loss='binary_crossentropy', optimizer = Adadelta, metrics=['accuracy'])
        
        # Fit the model        
        history = model[j].fit_generator(
        train_generator,
        steps_per_epoch=100,
        epochs=20,
        validation_data=validation_generator,
        validation_steps=50,
        verbose=1, 
        # callbacks=[TrainValTensorBoard("logs/{}".format(time()), write_graph=True)]
        )
        test_loss, test_acc = model[j].evaluate_generator(test_generator, steps=100)
        print('decay factor:', decay_factor[j])
        print('\ntest accuracy:', test_acc)
        print('test loss:', test_loss)
    
        acc = history.history['acc']
        val_acc = history.history['val_acc']
        loss = history.history['loss']
        val_loss = history.history['val_loss']
        epochs = range(len(acc))
    
        plt.plot(epochs, acc, 'bo', label='Training acc')
        plt.plot(epochs, val_acc, 'g-', label='Validation acc')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Accuracy")
        plt.title('Training and validation accuracy')
        plt.legend()
        plt.figure()

        plt.plot(epochs, loss, 'bo', label='Training loss')
        plt.plot(epochs, val_loss, 'g-', label='Validation loss')
        plt.xlabel("Num of Epochs")
        plt.ylabel("Loss")
        plt.title('Training and validation loss')
        plt.legend()
        
        plt.show()
In [17]:
CNN_Parameter_Tuning_3('relu', 'sigmoid')
Epoch 1/20
100/100 [==============================] - 14s 144ms/step - loss: 0.6312 - acc: 0.6787 - val_loss: 0.5659 - val_acc: 0.7238
Epoch 2/20
100/100 [==============================] - 14s 145ms/step - loss: 0.5432 - acc: 0.7343 - val_loss: 0.5999 - val_acc: 0.6625
Epoch 3/20
100/100 [==============================] - 10s 98ms/step - loss: 0.4967 - acc: 0.7547 - val_loss: 0.4852 - val_acc: 0.7625
Epoch 4/20
100/100 [==============================] - 11s 113ms/step - loss: 0.4977 - acc: 0.7550 - val_loss: 0.4640 - val_acc: 0.7850
Epoch 5/20
100/100 [==============================] - 11s 113ms/step - loss: 0.4751 - acc: 0.7662 - val_loss: 0.3974 - val_acc: 0.8350
Epoch 6/20
100/100 [==============================] - 12s 119ms/step - loss: 0.4806 - acc: 0.7662 - val_loss: 0.4450 - val_acc: 0.7929
Epoch 7/20
100/100 [==============================] - 12s 116ms/step - loss: 0.4873 - acc: 0.7634 - val_loss: 0.4479 - val_acc: 0.7819
Epoch 8/20
100/100 [==============================] - 12s 123ms/step - loss: 0.4711 - acc: 0.7797 - val_loss: 0.5459 - val_acc: 0.7350
Epoch 9/20
100/100 [==============================] - 13s 127ms/step - loss: 0.4883 - acc: 0.7639 - val_loss: 0.5118 - val_acc: 0.7656
Epoch 10/20
100/100 [==============================] - 15s 146ms/step - loss: 0.4901 - acc: 0.7666 - val_loss: 0.4871 - val_acc: 0.7725
Epoch 11/20
100/100 [==============================] - 14s 141ms/step - loss: 0.4984 - acc: 0.7678 - val_loss: 0.4568 - val_acc: 0.7997
Epoch 12/20
100/100 [==============================] - 12s 125ms/step - loss: 0.4390 - acc: 0.7891 - val_loss: 0.6806 - val_acc: 0.7031
Epoch 13/20
100/100 [==============================] - 11s 112ms/step - loss: 0.4167 - acc: 0.8128 - val_loss: 0.4787 - val_acc: 0.7831
Epoch 14/20
100/100 [==============================] - 13s 128ms/step - loss: 0.4403 - acc: 0.7980 - val_loss: 0.5212 - val_acc: 0.7531
Epoch 15/20
100/100 [==============================] - 14s 136ms/step - loss: 0.5132 - acc: 0.7503 - val_loss: 0.5193 - val_acc: 0.7612
Epoch 16/20
100/100 [==============================] - 17s 165ms/step - loss: 0.4827 - acc: 0.7750 - val_loss: 0.4334 - val_acc: 0.7941
Epoch 17/20
100/100 [==============================] - 15s 149ms/step - loss: 0.4904 - acc: 0.7834 - val_loss: 0.4621 - val_acc: 0.7994
Epoch 18/20
100/100 [==============================] - 20s 198ms/step - loss: 0.4621 - acc: 0.7873 - val_loss: 0.3790 - val_acc: 0.8369
Epoch 19/20
100/100 [==============================] - 17s 167ms/step - loss: 0.4432 - acc: 0.8044 - val_loss: 0.4262 - val_acc: 0.7981
Epoch 20/20
100/100 [==============================] - 14s 139ms/step - loss: 0.4144 - acc: 0.8166 - val_loss: 0.3705 - val_acc: 0.8275
decay factor: 0

test accuracy: 0.8353125
test loss: 0.3726767022907734
Epoch 1/20
100/100 [==============================] - 10s 96ms/step - loss: 0.6364 - acc: 0.6850 - val_loss: 0.5871 - val_acc: 0.7400
Epoch 2/20
100/100 [==============================] - 8s 85ms/step - loss: 0.5485 - acc: 0.7528 - val_loss: 0.5071 - val_acc: 0.7825
Epoch 3/20
100/100 [==============================] - 8s 80ms/step - loss: 0.4978 - acc: 0.7740 - val_loss: 0.4720 - val_acc: 0.7788
Epoch 4/20
100/100 [==============================] - 8s 77ms/step - loss: 0.4547 - acc: 0.7931 - val_loss: 0.4403 - val_acc: 0.8044505 - acc: 
Epoch 5/20
100/100 [==============================] - 8s 81ms/step - loss: 0.4414 - acc: 0.7972 - val_loss: 0.4462 - val_acc: 0.7844
Epoch 6/20
100/100 [==============================] - 7s 71ms/step - loss: 0.4026 - acc: 0.8247 - val_loss: 0.3994 - val_acc: 0.8354
Epoch 7/20
100/100 [==============================] - 8s 75ms/step - loss: 0.4016 - acc: 0.8200 - val_loss: 0.3816 - val_acc: 0.8325
Epoch 8/20
100/100 [==============================] - 7s 72ms/step - loss: 0.3787 - acc: 0.8341 - val_loss: 0.3761 - val_acc: 0.8313
Epoch 9/20
100/100 [==============================] - 7s 72ms/step - loss: 0.3634 - acc: 0.8491 - val_loss: 0.3551 - val_acc: 0.8387
Epoch 10/20
100/100 [==============================] - 7s 70ms/step - loss: 0.3534 - acc: 0.8444 - val_loss: 0.3362 - val_acc: 0.8612
Epoch 11/20
100/100 [==============================] - 9s 93ms/step - loss: 0.3488 - acc: 0.8497 - val_loss: 0.3291 - val_acc: 0.8686
Epoch 12/20
100/100 [==============================] - 8s 76ms/step - loss: 0.3457 - acc: 0.8524 - val_loss: 0.3261 - val_acc: 0.8619
Epoch 13/20
100/100 [==============================] - 10s 95ms/step - loss: 0.3393 - acc: 0.8588 - val_loss: 0.3491 - val_acc: 0.8494
Epoch 14/20
100/100 [==============================] - 8s 77ms/step - loss: 0.3406 - acc: 0.8500 - val_loss: 0.3281 - val_acc: 0.8588
Epoch 15/20
100/100 [==============================] - 8s 79ms/step - loss: 0.3253 - acc: 0.8656 - val_loss: 0.3448 - val_acc: 0.8494
Epoch 16/20
100/100 [==============================] - 8s 76ms/step - loss: 0.3226 - acc: 0.8597 - val_loss: 0.3095 - val_acc: 0.8667
Epoch 17/20
100/100 [==============================] - 8s 76ms/step - loss: 0.3310 - acc: 0.8588 - val_loss: 0.3652 - val_acc: 0.8394
Epoch 18/20
100/100 [==============================] - 7s 69ms/step - loss: 0.3300 - acc: 0.8625 - val_loss: 0.3064 - val_acc: 0.8894
Epoch 19/20
100/100 [==============================] - 8s 78ms/step - loss: 0.3102 - acc: 0.8691 - val_loss: 0.3003 - val_acc: 0.8700
Epoch 20/20
100/100 [==============================] - 9s 91ms/step - loss: 0.3084 - acc: 0.8725 - val_loss: 0.2953 - val_acc: 0.8844
decay factor: 0.1

test accuracy: 0.88125
test loss: 0.30139229983091353
Epoch 1/20
100/100 [==============================] - 12s 116ms/step - loss: 0.6410 - acc: 0.6878 - val_loss: 0.5900 - val_acc: 0.7494
Epoch 2/20
100/100 [==============================] - 11s 109ms/step - loss: 0.5199 - acc: 0.7876 - val_loss: 0.5004 - val_acc: 0.7706
Epoch 3/20
100/100 [==============================] - 10s 101ms/step - loss: 0.4750 - acc: 0.7784 - val_loss: 0.4322 - val_acc: 0.8100
Epoch 4/20
100/100 [==============================] - 9s 86ms/step - loss: 0.4184 - acc: 0.8213 - val_loss: 0.3892 - val_acc: 0.8438
Epoch 5/20
100/100 [==============================] - 9s 93ms/step - loss: 0.4041 - acc: 0.8256 - val_loss: 0.3722 - val_acc: 0.8531
Epoch 6/20
100/100 [==============================] - 10s 96ms/step - loss: 0.3718 - acc: 0.8397 - val_loss: 0.3555 - val_acc: 0.8523
Epoch 7/20
100/100 [==============================] - 9s 95ms/step - loss: 0.3473 - acc: 0.8566 - val_loss: 0.3309 - val_acc: 0.8638
Epoch 8/20
100/100 [==============================] - 11s 112ms/step - loss: 0.3448 - acc: 0.8528 - val_loss: 0.3236 - val_acc: 0.8688
Epoch 9/20
100/100 [==============================] - 10s 96ms/step - loss: 0.3413 - acc: 0.8534 - val_loss: 0.3529 - val_acc: 0.8512
Epoch 10/20
100/100 [==============================] - 8s 82ms/step - loss: 0.3332 - acc: 0.8561 - val_loss: 0.3180 - val_acc: 0.8706
Epoch 11/20
100/100 [==============================] - 9s 92ms/step - loss: 0.3158 - acc: 0.8709 - val_loss: 0.2853 - val_acc: 0.8886
Epoch 12/20
100/100 [==============================] - 10s 96ms/step - loss: 0.3074 - acc: 0.8747 - val_loss: 0.3320 - val_acc: 0.8569
Epoch 13/20
100/100 [==============================] - 8s 81ms/step - loss: 0.3254 - acc: 0.8678 - val_loss: 0.3235 - val_acc: 0.8575
Epoch 14/20
100/100 [==============================] - 9s 88ms/step - loss: 0.3204 - acc: 0.8619 - val_loss: 0.2956 - val_acc: 0.8744
Epoch 15/20
100/100 [==============================] - 10s 102ms/step - loss: 0.3138 - acc: 0.8717 - val_loss: 0.3136 - val_acc: 0.8756
Epoch 16/20
100/100 [==============================] - 9s 92ms/step - loss: 0.2952 - acc: 0.8753 - val_loss: 0.3186 - val_acc: 0.8692
Epoch 17/20
100/100 [==============================] - 8s 82ms/step - loss: 0.2999 - acc: 0.8744 - val_loss: 0.3094 - val_acc: 0.8788
Epoch 18/20
100/100 [==============================] - 8s 80ms/step - loss: 0.3056 - acc: 0.8792 - val_loss: 0.2915 - val_acc: 0.8750
Epoch 19/20
100/100 [==============================] - 9s 90ms/step - loss: 0.2952 - acc: 0.8716 - val_loss: 0.2769 - val_acc: 0.8975
Epoch 20/20
100/100 [==============================] - 8s 79ms/step - loss: 0.2981 - acc: 0.8772 - val_loss: 0.2944 - val_acc: 0.8719
decay factor: 0.5

test accuracy: 0.888125
test loss: 0.2908728568255901
Epoch 1/20
100/100 [==============================] - 10s 100ms/step - loss: 0.6177 - acc: 0.6863 - val_loss: 0.5608 - val_acc: 0.7137
Epoch 2/20
100/100 [==============================] - 9s 89ms/step - loss: 0.5205 - acc: 0.7506 - val_loss: 0.4732 - val_acc: 0.7762
Epoch 3/20
100/100 [==============================] - 8s 82ms/step - loss: 0.4638 - acc: 0.7791 - val_loss: 0.4367 - val_acc: 0.7981 loss: 0.4620 - acc:  - ETA: 1s - loss: 0.4606 - - ETA: 1s - loss: 0.4647 - acc: 0. - ETA: 0s - loss: 0.4611 
Epoch 4/20
100/100 [==============================] - 9s 90ms/step - loss: 0.4264 - acc: 0.8034 - val_loss: 0.4056 - val_acc: 0.8175
Epoch 5/20
100/100 [==============================] - 8s 79ms/step - loss: 0.4086 - acc: 0.8131 - val_loss: 0.3956 - val_acc: 0.8200
Epoch 6/20
100/100 [==============================] - 10s 100ms/step - loss: 0.3987 - acc: 0.8225 - val_loss: 0.3674 - val_acc: 0.8392
Epoch 7/20
100/100 [==============================] - 10s 101ms/step - loss: 0.3668 - acc: 0.8312 - val_loss: 0.3628 - val_acc: 0.8394
Epoch 8/20
100/100 [==============================] - 9s 90ms/step - loss: 0.3791 - acc: 0.8287 - val_loss: 0.3747 - val_acc: 0.8313
Epoch 9/20
100/100 [==============================] - 9s 93ms/step - loss: 0.3474 - acc: 0.8516 - val_loss: 0.3431 - val_acc: 0.8481
Epoch 10/20
100/100 [==============================] - 9s 91ms/step - loss: 0.3303 - acc: 0.8619 - val_loss: 0.3285 - val_acc: 0.8612
Epoch 11/20
100/100 [==============================] - 10s 99ms/step - loss: 0.3270 - acc: 0.8578 - val_loss: 0.3173 - val_acc: 0.8692
Epoch 12/20
100/100 [==============================] - 10s 97ms/step - loss: 0.3167 - acc: 0.8681 - val_loss: 0.3136 - val_acc: 0.8644
Epoch 13/20
100/100 [==============================] - 9s 93ms/step - loss: 0.3308 - acc: 0.8581 - val_loss: 0.3846 - val_acc: 0.8200
Epoch 14/20
100/100 [==============================] - 10s 104ms/step - loss: 0.3209 - acc: 0.8691 - val_loss: 0.3160 - val_acc: 0.8675
Epoch 15/20
100/100 [==============================] - 10s 97ms/step - loss: 0.3280 - acc: 0.8616 - val_loss: 0.3110 - val_acc: 0.8706
Epoch 16/20
100/100 [==============================] - 10s 97ms/step - loss: 0.3204 - acc: 0.8681 - val_loss: 0.3042 - val_acc: 0.8673
Epoch 17/20
100/100 [==============================] - 11s 109ms/step - loss: 0.2994 - acc: 0.8819 - val_loss: 0.2964 - val_acc: 0.8750
Epoch 18/20
100/100 [==============================] - 10s 97ms/step - loss: 0.3097 - acc: 0.8662 - val_loss: 0.2809 - val_acc: 0.8931
Epoch 19/20
100/100 [==============================] - 11s 114ms/step - loss: 0.2934 - acc: 0.8798 - val_loss: 0.3049 - val_acc: 0.8762
Epoch 20/20
100/100 [==============================] - 12s 121ms/step - loss: 0.2899 - acc: 0.8794 - val_loss: 0.2753 - val_acc: 0.8962
decay factor: 0.9

test accuracy: 0.89125
test loss: 0.28298124015331266
Epoch 1/20
100/100 [==============================] - 10s 105ms/step - loss: 0.6331 - acc: 0.6675 - val_loss: 0.5415 - val_acc: 0.7375
Epoch 2/20
100/100 [==============================] - 8s 80ms/step - loss: 0.4853 - acc: 0.7741 - val_loss: 0.4229 - val_acc: 0.8081
Epoch 3/20
100/100 [==============================] - 8s 82ms/step - loss: 0.4129 - acc: 0.8103 - val_loss: 0.5177 - val_acc: 0.7600
Epoch 4/20
100/100 [==============================] - 7s 72ms/step - loss: 0.3559 - acc: 0.8369 - val_loss: 0.3944 - val_acc: 0.8063
Epoch 5/20
100/100 [==============================] - 8s 80ms/step - loss: 0.3386 - acc: 0.8506 - val_loss: 0.3522 - val_acc: 0.8562
Epoch 6/20
100/100 [==============================] - 10s 96ms/step - loss: 0.3187 - acc: 0.8563 - val_loss: 0.2725 - val_acc: 0.8849
Epoch 7/20
100/100 [==============================] - 11s 107ms/step - loss: 0.3037 - acc: 0.8669 - val_loss: 0.2869 - val_acc: 0.8831
Epoch 8/20
100/100 [==============================] - 14s 136ms/step - loss: 0.2909 - acc: 0.8691 - val_loss: 0.2805 - val_acc: 0.8831
Epoch 9/20
100/100 [==============================] - 12s 117ms/step - loss: 0.2880 - acc: 0.8766 - val_loss: 0.2600 - val_acc: 0.8981
Epoch 10/20
100/100 [==============================] - 10s 97ms/step - loss: 0.2704 - acc: 0.8908 - val_loss: 0.3021 - val_acc: 0.8669
Epoch 11/20
100/100 [==============================] - 11s 112ms/step - loss: 0.2505 - acc: 0.8981 - val_loss: 0.5098 - val_acc: 0.7691
Epoch 12/20
100/100 [==============================] - 11s 109ms/step - loss: 0.2494 - acc: 0.8984 - val_loss: 0.2869 - val_acc: 0.8775
Epoch 13/20
100/100 [==============================] - 9s 93ms/step - loss: 0.2361 - acc: 0.9059 - val_loss: 0.2202 - val_acc: 0.9050
Epoch 14/20
100/100 [==============================] - 10s 104ms/step - loss: 0.2451 - acc: 0.8944 - val_loss: 0.2632 - val_acc: 0.8931
Epoch 15/20
100/100 [==============================] - 12s 122ms/step - loss: 0.2396 - acc: 0.8991 - val_loss: 0.2100 - val_acc: 0.9169
Epoch 16/20
100/100 [==============================] - 12s 116ms/step - loss: 0.2243 - acc: 0.9044 - val_loss: 0.2116 - val_acc: 0.9212
Epoch 17/20
100/100 [==============================] - 10s 103ms/step - loss: 0.2246 - acc: 0.9012 - val_loss: 0.2809 - val_acc: 0.8775
Epoch 18/20
100/100 [==============================] - 11s 107ms/step - loss: 0.1989 - acc: 0.9193 - val_loss: 0.2575 - val_acc: 0.8962
Epoch 19/20
100/100 [==============================] - 9s 91ms/step - loss: 0.2117 - acc: 0.9125 - val_loss: 0.2317 - val_acc: 0.9100
Epoch 20/20
100/100 [==============================] - 9s 94ms/step - loss: 0.2220 - acc: 0.9041 - val_loss: 0.2709 - val_acc: 0.8894
decay factor: 1

test accuracy: 0.875625
test loss: 0.3235200372338295

3.4 The Interpretation of Results for Parameter Tuning

The number of optimal batch size: Based on the accuracy and loss of training, validation, and test sets, we found that the optimal batch size is 16. Increasing the batch size does NOT apparently improve the results, which is not worth the additional computational cost. Besides, the optimal learning rate for the Adadelta optimizer is 0.1, and the optimal decay factor for the Adadelta optimizer is 1.

Assess the best model on the test data.

In [8]:
K.clear_session()

train_generator = train_datagen.flow_from_directory(
    train_dir,
    target_size=(64, 64),
    batch_size = 16,
    class_mode='binary'
)


for train_data_batch, train_labels_batch in train_generator:
    print('data batch shape:', train_data_batch.shape)
    print('labels batch shape:', train_labels_batch.shape)
    break
    
Adadelta = optimizers.Adadelta(lr=0.1, rho = 1, epsilon=None, decay=0.0)
        
model = models.Sequential()
model.add(layers.Conv2D(16, (3, 3), padding='same', activation='relu',
                            input_shape=train_data_batch.shape[1:], name = 'conv1'))
        
model.add(layers.MaxPooling2D((2, 2), name='max_pool1'))
model.add(layers.Conv2D(32, (3, 3), padding='same', activation='relu', name = 'conv2'))
model.add(layers.MaxPooling2D((2, 2), name='max_pool2'))
model.add(layers.Flatten())

model.add(layers.Dense(16, kernel_initializer='glorot_uniform', activation='relu', 
                                  kernel_regularizer=keras.regularizers.l2(0.00001), activity_regularizer=keras.regularizers.l1(0.00001), name='fc1'))
model.add(layers.Dropout(0, noise_shape=None, seed=None))
        
model.add(layers.Dense(1, kernel_initializer='glorot_uniform', activation='sigmoid', 
                                  kernel_regularizer=keras.regularizers.l2(0.00001), activity_regularizer=keras.regularizers.l1(0.00001), name='fc2'))
model.add(layers.Dropout(0, noise_shape=None, seed=None))
        
model.compile(loss='binary_crossentropy', optimizer = Adadelta, metrics=['accuracy'])
        
# Fit the model        
history = model.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=40,
    validation_data=validation_generator,
    validation_steps=50,
    verbose=1, 
)

prediction = model.predict_generator(test_generator, steps=len(test_generator))
Found 16315 images belonging to 2 classes.
data batch shape: (16, 64, 64, 3)
labels batch shape: (16,)
Epoch 1/40
100/100 [==============================] - 12s 123ms/step - loss: 0.6077 - acc: 0.6781 - val_loss: 0.7376 - val_acc: 0.5169
Epoch 2/40
100/100 [==============================] - 7s 72ms/step - loss: 0.5595 - acc: 0.7100 - val_loss: 0.4708 - val_acc: 0.7538
Epoch 3/40
100/100 [==============================] - 7s 74ms/step - loss: 0.4652 - acc: 0.7800 - val_loss: 0.4334 - val_acc: 0.8106
Epoch 4/40
100/100 [==============================] - 10s 96ms/step - loss: 0.4044 - acc: 0.8022 - val_loss: 0.5124 - val_acc: 0.7206
Epoch 5/40
100/100 [==============================] - 12s 116ms/step - loss: 0.3915 - acc: 0.8044 - val_loss: 0.3226 - val_acc: 0.8556
Epoch 6/40
100/100 [==============================] - 8s 76ms/step - loss: 0.3839 - acc: 0.8312 - val_loss: 0.3594 - val_acc: 0.8423
Epoch 7/40
100/100 [==============================] - 8s 82ms/step - loss: 0.3587 - acc: 0.8331 - val_loss: 0.3176 - val_acc: 0.8500
Epoch 8/40
100/100 [==============================] - 8s 76ms/step - loss: 0.3462 - acc: 0.8394 - val_loss: 0.3081 - val_acc: 0.8556
Epoch 9/40
100/100 [==============================] - 8s 77ms/step - loss: 0.3786 - acc: 0.8256 - val_loss: 0.3771 - val_acc: 0.8581
Epoch 10/40
100/100 [==============================] - 7s 70ms/step - loss: 0.3471 - acc: 0.8569 - val_loss: 0.3121 - val_acc: 0.8612
Epoch 11/40
100/100 [==============================] - 6s 62ms/step - loss: 0.3346 - acc: 0.8519 - val_loss: 0.3460 - val_acc: 0.8329
Epoch 12/40
100/100 [==============================] - 8s 78ms/step - loss: 0.3177 - acc: 0.8588 - val_loss: 0.2892 - val_acc: 0.8688
Epoch 13/40
100/100 [==============================] - 9s 90ms/step - loss: 0.3109 - acc: 0.8681 - val_loss: 0.3507 - val_acc: 0.8512
Epoch 14/40
100/100 [==============================] - 6s 64ms/step - loss: 0.2874 - acc: 0.8781 - val_loss: 0.2987 - val_acc: 0.8600
Epoch 15/40
100/100 [==============================] - 7s 65ms/step - loss: 0.2915 - acc: 0.8844 - val_loss: 0.3062 - val_acc: 0.8856
Epoch 16/40
100/100 [==============================] - 7s 69ms/step - loss: 0.2778 - acc: 0.8744 - val_loss: 0.2551 - val_acc: 0.8842
Epoch 17/40
100/100 [==============================] - 8s 78ms/step - loss: 0.2789 - acc: 0.8794 - val_loss: 0.2527 - val_acc: 0.8938
Epoch 18/40
100/100 [==============================] - 7s 67ms/step - loss: 0.2615 - acc: 0.8994 - val_loss: 0.2453 - val_acc: 0.8988
Epoch 19/40
100/100 [==============================] - 7s 70ms/step - loss: 0.2864 - acc: 0.8825 - val_loss: 0.2695 - val_acc: 0.8950
Epoch 20/40
100/100 [==============================] - 6s 57ms/step - loss: 0.2856 - acc: 0.8725 - val_loss: 0.3191 - val_acc: 0.8588
Epoch 21/40
100/100 [==============================] - 6s 60ms/step - loss: 0.2672 - acc: 0.8863 - val_loss: 0.2263 - val_acc: 0.9080
Epoch 22/40
100/100 [==============================] - 6s 60ms/step - loss: 0.2490 - acc: 0.8950 - val_loss: 0.3148 - val_acc: 0.8794
Epoch 23/40
100/100 [==============================] - 5s 55ms/step - loss: 0.2358 - acc: 0.9056 - val_loss: 0.2352 - val_acc: 0.8944
Epoch 24/40
100/100 [==============================] - 9s 88ms/step - loss: 0.2615 - acc: 0.8910 - val_loss: 0.2710 - val_acc: 0.8956
Epoch 25/40
100/100 [==============================] - 18s 181ms/step - loss: 0.2329 - acc: 0.9019 - val_loss: 0.2305 - val_acc: 0.9144
Epoch 26/40
100/100 [==============================] - 12s 123ms/step - loss: 0.2444 - acc: 0.8925 - val_loss: 0.4568 - val_acc: 0.7872
Epoch 27/40
100/100 [==============================] - 6s 56ms/step - loss: 0.2443 - acc: 0.8888 - val_loss: 0.2149 - val_acc: 0.9137
Epoch 28/40
100/100 [==============================] - 6s 57ms/step - loss: 0.2421 - acc: 0.9012 - val_loss: 0.2148 - val_acc: 0.9125
Epoch 29/40
100/100 [==============================] - 6s 57ms/step - loss: 0.2504 - acc: 0.8969 - val_loss: 0.2841 - val_acc: 0.8744
Epoch 30/40
100/100 [==============================] - 9s 90ms/step - loss: 0.2120 - acc: 0.9100 - val_loss: 0.2411 - val_acc: 0.9050
Epoch 31/40
100/100 [==============================] - 6s 63ms/step - loss: 0.2394 - acc: 0.8950 - val_loss: 0.2375 - val_acc: 0.9036
Epoch 32/40
100/100 [==============================] - 7s 67ms/step - loss: 0.2123 - acc: 0.9181 - val_loss: 0.2585 - val_acc: 0.8950
Epoch 33/40
100/100 [==============================] - 6s 62ms/step - loss: 0.2063 - acc: 0.9106 - val_loss: 0.2562 - val_acc: 0.8912
Epoch 34/40
100/100 [==============================] - 5s 54ms/step - loss: 0.1957 - acc: 0.9154 - val_loss: 0.2217 - val_acc: 0.9200
Epoch 35/40
100/100 [==============================] - 6s 58ms/step - loss: 0.2259 - acc: 0.9044 - val_loss: 0.2450 - val_acc: 0.8981
Epoch 36/40
100/100 [==============================] - 5s 49ms/step - loss: 0.2289 - acc: 0.9119 - val_loss: 0.2312 - val_acc: 0.9111
Epoch 37/40
100/100 [==============================] - 6s 56ms/step - loss: 0.1926 - acc: 0.9206 - val_loss: 0.2493 - val_acc: 0.9106
Epoch 38/40
100/100 [==============================] - 5s 50ms/step - loss: 0.2111 - acc: 0.9081 - val_loss: 0.2519 - val_acc: 0.8981
Epoch 39/40
100/100 [==============================] - 4s 36ms/step - loss: 0.1957 - acc: 0.9131 - val_loss: 0.2180 - val_acc: 0.9106
Epoch 40/40
100/100 [==============================] - 7s 72ms/step - loss: 0.2113 - acc: 0.9087 - val_loss: 0.2301 - val_acc: 0.9125
In [9]:
# Change the predictions on the test set to a list "prediction_list". 
new_prediction = []
for i in range (0, len(prediction)):
    new_prediction.append(prediction[i][0])
prediction_list = []
for predict_value in new_prediction:
    if predict_value >= 0.5:
        prediction_list.append(1)
    else:
        prediction_list.append(0)
        
# Collect all of the labels in the test set into a list "test_value_list".
test_value_list = []
for i in range(0 , len(test_generator)):
    test_value_list.extend(test_generator[i][1].tolist())

(A) The corresponding AUC curve

In [10]:
# Plot the AUC
fpr, tpr, threshold = metrics.roc_curve(test_value_list, new_prediction)
roc_auc = metrics.auc(fpr, tpr)

plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()

The AUC curve of the assessment of the best model on the test data is demonstated above. The value of AUC is 0.97, which represents a good test.

(B) The number of misclassified Images (on the test set data)

In [14]:
misclassified_portrait = []
misclassified_landscape = []
for i in range (0, len(prediction)):
    if test_value_list[i] == prediction_list[i]:
        pass
    elif prediction_list[i] == 0:  # Misclassify portrait as landsapce
        misclassified_portrait.append(i)
    else:                         # Misclassify landsapce as portrait
        misclassified_landscape.append(i)
        
print('The number of portrait misclassified as landsapce', len(misclassified_portrait))
print('The number of landscape misclassified as portrait', len(misclassified_landscape))
The number of portrait misclassified as landsapce 510
The number of landscape misclassified as portrait 229